commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b182032ae092560b2423e77f657ec0794ce38e6
|
planetstack/model_policies/model_policy_User.py
|
planetstack/model_policies/model_policy_User.py
|
from core.models import *
def handle(user):
deployments = Deployment.objects.all()
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
user_deploy_lookup = defaultdict(list)
for user_deployment in UserDeployments.objects.all():
user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
all_deployments = Deployment.objects.filter()
if user.is_admin:
# admins should have an account at all deployments
expected_deployments = deployments
else:
# normal users should have an account at their site's deployments
#expected_deployments = site_deploy_lookup[user.site]
# users are added to all deployments for now
expected_deployments = deployments
for expected_deployment in expected_deployments:
if not user in user_deploy_lookup or \
expected_deployment not in user_deploy_lookup[user]:
# add new record
ud = UserDeployments(user=user, deployment=expected_deployment)
ud.save()
|
Add new users to all deployments
|
Policy: Add new users to all deployments
|
Python
|
apache-2.0
|
cboling/xos,wathsalav/xos,cboling/xos,open-cloud/xos,opencord/xos,xmaruto/mcord,open-cloud/xos,wathsalav/xos,opencord/xos,wathsalav/xos,opencord/xos,zdw/xos,jermowery/xos,jermowery/xos,zdw/xos,zdw/xos,cboling/xos,wathsalav/xos,open-cloud/xos,xmaruto/mcord,zdw/xos,jermowery/xos,xmaruto/mcord,xmaruto/mcord,cboling/xos,cboling/xos,jermowery/xos
|
Policy: Add new users to all deployments
|
from core.models import *
def handle(user):
deployments = Deployment.objects.all()
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
user_deploy_lookup = defaultdict(list)
for user_deployment in UserDeployments.objects.all():
user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
all_deployments = Deployment.objects.filter()
if user.is_admin:
# admins should have an account at all deployments
expected_deployments = deployments
else:
# normal users should have an account at their site's deployments
#expected_deployments = site_deploy_lookup[user.site]
# users are added to all deployments for now
expected_deployments = deployments
for expected_deployment in expected_deployments:
if not user in user_deploy_lookup or \
expected_deployment not in user_deploy_lookup[user]:
# add new record
ud = UserDeployments(user=user, deployment=expected_deployment)
ud.save()
|
<commit_before><commit_msg>Policy: Add new users to all deployments<commit_after>
|
from core.models import *
def handle(user):
deployments = Deployment.objects.all()
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
user_deploy_lookup = defaultdict(list)
for user_deployment in UserDeployments.objects.all():
user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
all_deployments = Deployment.objects.filter()
if user.is_admin:
# admins should have an account at all deployments
expected_deployments = deployments
else:
# normal users should have an account at their site's deployments
#expected_deployments = site_deploy_lookup[user.site]
# users are added to all deployments for now
expected_deployments = deployments
for expected_deployment in expected_deployments:
if not user in user_deploy_lookup or \
expected_deployment not in user_deploy_lookup[user]:
# add new record
ud = UserDeployments(user=user, deployment=expected_deployment)
ud.save()
|
Policy: Add new users to all deploymentsfrom core.models import *
def handle(user):
deployments = Deployment.objects.all()
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
user_deploy_lookup = defaultdict(list)
for user_deployment in UserDeployments.objects.all():
user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
all_deployments = Deployment.objects.filter()
if user.is_admin:
# admins should have an account at all deployments
expected_deployments = deployments
else:
# normal users should have an account at their site's deployments
#expected_deployments = site_deploy_lookup[user.site]
# users are added to all deployments for now
expected_deployments = deployments
for expected_deployment in expected_deployments:
if not user in user_deploy_lookup or \
expected_deployment not in user_deploy_lookup[user]:
# add new record
ud = UserDeployments(user=user, deployment=expected_deployment)
ud.save()
|
<commit_before><commit_msg>Policy: Add new users to all deployments<commit_after>from core.models import *
def handle(user):
deployments = Deployment.objects.all()
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
user_deploy_lookup = defaultdict(list)
for user_deployment in UserDeployments.objects.all():
user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
all_deployments = Deployment.objects.filter()
if user.is_admin:
# admins should have an account at all deployments
expected_deployments = deployments
else:
# normal users should have an account at their site's deployments
#expected_deployments = site_deploy_lookup[user.site]
# users are added to all deployments for now
expected_deployments = deployments
for expected_deployment in expected_deployments:
if not user in user_deploy_lookup or \
expected_deployment not in user_deploy_lookup[user]:
# add new record
ud = UserDeployments(user=user, deployment=expected_deployment)
ud.save()
|
|
7e7bd440a1e3f585464df3458070528d0100d456
|
pyseidon/handlers/__init__.py
|
pyseidon/handlers/__init__.py
|
import pyseidon
import sys
def handle_script():
import runpy
"""
Allow the client to run an arbitrary Python script.
Here's sample usage:
```
def expensive_setup():
...
if __name__ == '__main__':
expensive_setup()
import pyseidon.handlers
pyseidon.handlers.handle_script()
```
"""
def handler():
if len(sys.argv) < 1:
print >>sys.stderr, 'Must provide path to Python script to execute'
sys.exit(1)
runpy.run_path(sys.argv[0], run_name='__main__')
master = pyseidon.Pyseidon()
master.run(handler)
|
Add helper to run requested Python script
|
Add helper to run requested Python script
|
Python
|
mit
|
gdb/pyseidon,gdb/pyseidon
|
Add helper to run requested Python script
|
import pyseidon
import sys
def handle_script():
import runpy
"""
Allow the client to run an arbitrary Python script.
Here's sample usage:
```
def expensive_setup():
...
if __name__ == '__main__':
expensive_setup()
import pyseidon.handlers
pyseidon.handlers.handle_script()
```
"""
def handler():
if len(sys.argv) < 1:
print >>sys.stderr, 'Must provide path to Python script to execute'
sys.exit(1)
runpy.run_path(sys.argv[0], run_name='__main__')
master = pyseidon.Pyseidon()
master.run(handler)
|
<commit_before><commit_msg>Add helper to run requested Python script<commit_after>
|
import pyseidon
import sys
def handle_script():
import runpy
"""
Allow the client to run an arbitrary Python script.
Here's sample usage:
```
def expensive_setup():
...
if __name__ == '__main__':
expensive_setup()
import pyseidon.handlers
pyseidon.handlers.handle_script()
```
"""
def handler():
if len(sys.argv) < 1:
print >>sys.stderr, 'Must provide path to Python script to execute'
sys.exit(1)
runpy.run_path(sys.argv[0], run_name='__main__')
master = pyseidon.Pyseidon()
master.run(handler)
|
Add helper to run requested Python scriptimport pyseidon
import sys
def handle_script():
import runpy
"""
Allow the client to run an arbitrary Python script.
Here's sample usage:
```
def expensive_setup():
...
if __name__ == '__main__':
expensive_setup()
import pyseidon.handlers
pyseidon.handlers.handle_script()
```
"""
def handler():
if len(sys.argv) < 1:
print >>sys.stderr, 'Must provide path to Python script to execute'
sys.exit(1)
runpy.run_path(sys.argv[0], run_name='__main__')
master = pyseidon.Pyseidon()
master.run(handler)
|
<commit_before><commit_msg>Add helper to run requested Python script<commit_after>import pyseidon
import sys
def handle_script():
import runpy
"""
Allow the client to run an arbitrary Python script.
Here's sample usage:
```
def expensive_setup():
...
if __name__ == '__main__':
expensive_setup()
import pyseidon.handlers
pyseidon.handlers.handle_script()
```
"""
def handler():
if len(sys.argv) < 1:
print >>sys.stderr, 'Must provide path to Python script to execute'
sys.exit(1)
runpy.run_path(sys.argv[0], run_name='__main__')
master = pyseidon.Pyseidon()
master.run(handler)
|
|
87172e2b9e0143cf164dc34c26c69fc4eda7dd1e
|
seleniumbase/config/ad_block_list.py
|
seleniumbase/config/ad_block_list.py
|
"""
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
|
Add initial block list for ad_block functionality
|
Add initial block list for ad_block functionality
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase
|
Add initial block list for ad_block functionality
|
"""
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
|
<commit_before><commit_msg>Add initial block list for ad_block functionality<commit_after>
|
"""
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
|
Add initial block list for ad_block functionality"""
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
|
<commit_before><commit_msg>Add initial block list for ad_block functionality<commit_after>"""
For use with SeleniumBase ad_block functionality.
Usage:
On the command line:
"pytest SOME_TEST.py --ad_block"
From inside a test:
self.ad_block()
If using the command line version, the ad_block functionality gets
activated after "self.wait_for_ready_state_complete()" is called,
which is always run after page loads, unless changed in "settings.py".
Using ad_block will slow down test runs a little. (Use only if necessary.)
Format: A CSS Selector that's ready for JavaScript's querySelectorAll()
"""
AD_BLOCK_LIST = [
'[aria-label="Ad"]',
'[class^="sponsored-content"]',
'[data-ad-details*="Advertisement"]',
'[data-native_ad*="placement"]',
'[data-provider="dianomi"]',
'[data-type="ad"]',
'[data-track-event-label*="-taboola-"]',
'[href*="doubleclick.net/"]',
'[id*="-ad-"]',
'[id*="_ads_"]',
'[id*="AdFrame"]',
'[id^="ad-"]',
'[id^="outbrain_widget"]',
'[id^="taboola-"]',
'[id="dianomiRightRail"]',
'[src*="smartads."]',
'[src*="ad_nexus"]',
'[src*="/ads/"]',
'[data-dcm-click-tracker*="/adclick."]',
'[data-google-query-id^="C"]',
'div.ad-container',
'div.ad_module',
'div.ad-subnav-container',
'div.ad-wrapper',
'div.data-ad-container',
'div.l-ad',
'div.right-ad',
'div.wx-adWrapper',
'img.img_ad',
'link[href*="/adservice."]',
'script[src*="/adservice."]',
'script[src*="/pagead/"]',
'section.dianomi-ad',
]
|
|
2d4f09fe8c31aa2b996e71565292d5ef249986c7
|
tools/gyp-explain.py
|
tools/gyp-explain.py
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
Add a small tool to answer questions like "Why does target A depend on target B".
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
yitian134/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,ropik/chromium,ropik/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,yitian134/chromium
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
<commit_before><commit_msg>Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
<commit_before><commit_msg>Add a small tool to answer questions like "Why does target A depend on target B".
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/8672006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@111430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints paths between gyp targets.
"""
import json
import os
import sys
import time
from collections import deque
def usage():
print """\
Usage:
tools/gyp-explain.py chrome_dll gtest#
"""
def GetPath(graph, fro, to):
"""Given a graph in (node -> list of successor nodes) dictionary format,
yields all paths from |fro| to |to|, starting with the shortest."""
# Storing full paths in the queue is a bit wasteful, but good enough for this.
q = deque([(fro, [])])
while q:
t, path = q.popleft()
if t == to:
yield path + [t]
for d in graph[t]:
q.append((d, path + [t]))
def MatchNode(graph, substring):
"""Given a dictionary, returns the key that matches |substring| best. Exits
if there's not one single best match."""
candidates = []
for target in graph:
if substring in target:
candidates.append(target)
if not candidates:
print 'No targets match "%s"' % substring
sys.exit(1)
if len(candidates) > 1:
print 'More than one target matches "%s": %s' % (
substring, ' '.join(candidates))
sys.exit(1)
return candidates[0]
def Main(argv):
# Check that dump.json exists and that it's not too old.
dump_json_dirty = False
try:
st = os.stat('dump.json')
file_age_s = time.time() - st.st_mtime
if file_age_s > 2 * 60 * 60:
print 'dump.json is more than 2 hours old.'
dump_json_dirty = True
except IOError:
print 'dump.json not found.'
dump_json_dirty = True
if dump_json_dirty:
print 'Run'
print ' GYP_GENERATORS=dump_dependency_json build/gyp_chromium'
print 'first, then try again.'
sys.exit(1)
g = json.load(open('dump.json'))
if len(argv) != 3:
usage()
sys.exit(1)
fro = MatchNode(g, argv[1])
to = MatchNode(g, argv[2])
paths = list(GetPath(g, fro, to))
if len(paths) > 0:
print 'These paths lead from %s to %s:' % (fro, to)
for path in paths:
print ' -> '.join(path)
else:
print 'No paths found from %s to %s.' % (fro, to)
if __name__ == '__main__':
Main(sys.argv)
|
|
5e32d890fe0762163f1edab6672df91e7d461d8f
|
check-if-a-given-sequence-of-moves-for-a-robot-is-circular-or-not.py
|
check-if-a-given-sequence-of-moves-for-a-robot-is-circular-or-not.py
|
from operator import add
import math
moves = raw_input("Enter the moves: ")
start_position = [0,0]
current_position = [0,0]
'''
heading = [1,90] - 1 step North
[1, -90] - 1 step South
[1,0] - East
[1,360] - West
'''
heading = [1,0]
for move in moves:
if move.upper() == "G":
angle = heading[1]
step = heading[0]
move_coord = [ round(step*math.cos(math.radians(angle))), round(step*math.sin(math.radians(angle))) ]
current_position = map(add, current_position, move_coord)
elif move.upper() == "L":
heading = map(add, heading, [0, 90])
elif move.upper() == "R":
heading = map(add, heading, [0, -90])
if start_position == current_position:
print "Given sequence of moves is circular"
else:
print "Given sequence of moves is NOT circular"
|
Check if a given sequence of moves for a robot is circular or not (py)
|
[New] :: Check if a given sequence of moves for a robot is circular or not (py)
|
Python
|
apache-2.0
|
MayankAgarwal/GeeksForGeeks
|
[New] :: Check if a given sequence of moves for a robot is circular or not (py)
|
from operator import add
import math
moves = raw_input("Enter the moves: ")
start_position = [0,0]
current_position = [0,0]
'''
heading = [1,90] - 1 step North
[1, -90] - 1 step South
[1,0] - East
[1,360] - West
'''
heading = [1,0]
for move in moves:
if move.upper() == "G":
angle = heading[1]
step = heading[0]
move_coord = [ round(step*math.cos(math.radians(angle))), round(step*math.sin(math.radians(angle))) ]
current_position = map(add, current_position, move_coord)
elif move.upper() == "L":
heading = map(add, heading, [0, 90])
elif move.upper() == "R":
heading = map(add, heading, [0, -90])
if start_position == current_position:
print "Given sequence of moves is circular"
else:
print "Given sequence of moves is NOT circular"
|
<commit_before><commit_msg>[New] :: Check if a given sequence of moves for a robot is circular or not (py)<commit_after>
|
from operator import add
import math
moves = raw_input("Enter the moves: ")
start_position = [0,0]
current_position = [0,0]
'''
heading = [1,90] - 1 step North
[1, -90] - 1 step South
[1,0] - East
[1,360] - West
'''
heading = [1,0]
for move in moves:
if move.upper() == "G":
angle = heading[1]
step = heading[0]
move_coord = [ round(step*math.cos(math.radians(angle))), round(step*math.sin(math.radians(angle))) ]
current_position = map(add, current_position, move_coord)
elif move.upper() == "L":
heading = map(add, heading, [0, 90])
elif move.upper() == "R":
heading = map(add, heading, [0, -90])
if start_position == current_position:
print "Given sequence of moves is circular"
else:
print "Given sequence of moves is NOT circular"
|
[New] :: Check if a given sequence of moves for a robot is circular or not (py)from operator import add
import math
moves = raw_input("Enter the moves: ")
start_position = [0,0]
current_position = [0,0]
'''
heading = [1,90] - 1 step North
[1, -90] - 1 step South
[1,0] - East
[1,360] - West
'''
heading = [1,0]
for move in moves:
if move.upper() == "G":
angle = heading[1]
step = heading[0]
move_coord = [ round(step*math.cos(math.radians(angle))), round(step*math.sin(math.radians(angle))) ]
current_position = map(add, current_position, move_coord)
elif move.upper() == "L":
heading = map(add, heading, [0, 90])
elif move.upper() == "R":
heading = map(add, heading, [0, -90])
if start_position == current_position:
print "Given sequence of moves is circular"
else:
print "Given sequence of moves is NOT circular"
|
<commit_before><commit_msg>[New] :: Check if a given sequence of moves for a robot is circular or not (py)<commit_after>from operator import add
import math
moves = raw_input("Enter the moves: ")
start_position = [0,0]
current_position = [0,0]
'''
heading = [1,90] - 1 step North
[1, -90] - 1 step South
[1,0] - East
[1,360] - West
'''
heading = [1,0]
for move in moves:
if move.upper() == "G":
angle = heading[1]
step = heading[0]
move_coord = [ round(step*math.cos(math.radians(angle))), round(step*math.sin(math.radians(angle))) ]
current_position = map(add, current_position, move_coord)
elif move.upper() == "L":
heading = map(add, heading, [0, 90])
elif move.upper() == "R":
heading = map(add, heading, [0, -90])
if start_position == current_position:
print "Given sequence of moves is circular"
else:
print "Given sequence of moves is NOT circular"
|
|
72af391ec00facfbabc8ac89ff3bea1b54799d97
|
htdocs/plotting/auto/scripts/p50.py
|
htdocs/plotting/auto/scripts/p50.py
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import psycopg2.extras
import pyiem.nws.vtec as vtec
from pyiem.network import Table as NetworkTable
import numpy as np
import pytz
PDICT = {
"hadgem=a1b": "HADGEM A1B",
"cnrm=a1b" : "CNRM A1B",
"echam5=a1b" : "ECHAM5 A1B",
"echo=a1b" : "ECHO A1B",
"pcm=a1b" : "PCM A1B",
"miroc_hi=a1b": "MIROC_HI A1B",
"cgcm3_t47=a1b": "CGCM3_T47 A1B",
"giss_aom=a1b": "GISS_AOM A1B",
"hadcm3=a1b": "HADCM3 A1B",
"cgcm3_t63=a1b": "CGCM3_T63 A1B",
}
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['cache'] = 86400
d['description'] = """ """
d['arguments'] = [
dict(type='networkselect', name='station', network='CSCAP',
default='ISUAG', label='Select CSCAP Site:'),
dict(type='select', name='model', default='echo=a1b',
label='Select Model:', options=PDICT)
]
return d
def plotter( fdict ):
""" Go """
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = fdict.get('station', 'ISUAG')
nt = NetworkTable("CSCAP")
clstation = nt.sts[station]['climate_site']
(model, scenario) = fdict.get('model', 'hadgem=a1b').split("=")
(fig, ax) = plt.subplots(1, 1)
cursor.execute("""
SELECT extract(year from day) as yr, sum(case when precip > 0
THEN 1 else 0 end) from hayhoe_daily WHERE precip is not null and
station = %s and model = %s and scenario = %s
GROUP by yr ORDER by yr ASC
""", (clstation, model, scenario))
years = []
precip = []
for row in cursor:
years.append(row[0])
precip.append(row[1])
ax.bar(years, precip, ec='b', fc='b')
ax.grid(True)
ax.set_ylabel("Days Per Year")
ax.set_title("%s %s\n%s %s :: Days per Year with Measureable Precip" % (
station, nt.sts[station]['name'], model,
scenario))
return fig
|
Add plot of days per year with precip
|
Add plot of days per year with precip
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add plot of days per year with precip
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import psycopg2.extras
import pyiem.nws.vtec as vtec
from pyiem.network import Table as NetworkTable
import numpy as np
import pytz
PDICT = {
"hadgem=a1b": "HADGEM A1B",
"cnrm=a1b" : "CNRM A1B",
"echam5=a1b" : "ECHAM5 A1B",
"echo=a1b" : "ECHO A1B",
"pcm=a1b" : "PCM A1B",
"miroc_hi=a1b": "MIROC_HI A1B",
"cgcm3_t47=a1b": "CGCM3_T47 A1B",
"giss_aom=a1b": "GISS_AOM A1B",
"hadcm3=a1b": "HADCM3 A1B",
"cgcm3_t63=a1b": "CGCM3_T63 A1B",
}
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['cache'] = 86400
d['description'] = """ """
d['arguments'] = [
dict(type='networkselect', name='station', network='CSCAP',
default='ISUAG', label='Select CSCAP Site:'),
dict(type='select', name='model', default='echo=a1b',
label='Select Model:', options=PDICT)
]
return d
def plotter( fdict ):
""" Go """
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = fdict.get('station', 'ISUAG')
nt = NetworkTable("CSCAP")
clstation = nt.sts[station]['climate_site']
(model, scenario) = fdict.get('model', 'hadgem=a1b').split("=")
(fig, ax) = plt.subplots(1, 1)
cursor.execute("""
SELECT extract(year from day) as yr, sum(case when precip > 0
THEN 1 else 0 end) from hayhoe_daily WHERE precip is not null and
station = %s and model = %s and scenario = %s
GROUP by yr ORDER by yr ASC
""", (clstation, model, scenario))
years = []
precip = []
for row in cursor:
years.append(row[0])
precip.append(row[1])
ax.bar(years, precip, ec='b', fc='b')
ax.grid(True)
ax.set_ylabel("Days Per Year")
ax.set_title("%s %s\n%s %s :: Days per Year with Measureable Precip" % (
station, nt.sts[station]['name'], model,
scenario))
return fig
|
<commit_before><commit_msg>Add plot of days per year with precip<commit_after>
|
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import psycopg2.extras
import pyiem.nws.vtec as vtec
from pyiem.network import Table as NetworkTable
import numpy as np
import pytz
PDICT = {
"hadgem=a1b": "HADGEM A1B",
"cnrm=a1b" : "CNRM A1B",
"echam5=a1b" : "ECHAM5 A1B",
"echo=a1b" : "ECHO A1B",
"pcm=a1b" : "PCM A1B",
"miroc_hi=a1b": "MIROC_HI A1B",
"cgcm3_t47=a1b": "CGCM3_T47 A1B",
"giss_aom=a1b": "GISS_AOM A1B",
"hadcm3=a1b": "HADCM3 A1B",
"cgcm3_t63=a1b": "CGCM3_T63 A1B",
}
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['cache'] = 86400
d['description'] = """ """
d['arguments'] = [
dict(type='networkselect', name='station', network='CSCAP',
default='ISUAG', label='Select CSCAP Site:'),
dict(type='select', name='model', default='echo=a1b',
label='Select Model:', options=PDICT)
]
return d
def plotter( fdict ):
""" Go """
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = fdict.get('station', 'ISUAG')
nt = NetworkTable("CSCAP")
clstation = nt.sts[station]['climate_site']
(model, scenario) = fdict.get('model', 'hadgem=a1b').split("=")
(fig, ax) = plt.subplots(1, 1)
cursor.execute("""
SELECT extract(year from day) as yr, sum(case when precip > 0
THEN 1 else 0 end) from hayhoe_daily WHERE precip is not null and
station = %s and model = %s and scenario = %s
GROUP by yr ORDER by yr ASC
""", (clstation, model, scenario))
years = []
precip = []
for row in cursor:
years.append(row[0])
precip.append(row[1])
ax.bar(years, precip, ec='b', fc='b')
ax.grid(True)
ax.set_ylabel("Days Per Year")
ax.set_title("%s %s\n%s %s :: Days per Year with Measureable Precip" % (
station, nt.sts[station]['name'], model,
scenario))
return fig
|
Add plot of days per year with precipimport matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import psycopg2.extras
import pyiem.nws.vtec as vtec
from pyiem.network import Table as NetworkTable
import numpy as np
import pytz
PDICT = {
"hadgem=a1b": "HADGEM A1B",
"cnrm=a1b" : "CNRM A1B",
"echam5=a1b" : "ECHAM5 A1B",
"echo=a1b" : "ECHO A1B",
"pcm=a1b" : "PCM A1B",
"miroc_hi=a1b": "MIROC_HI A1B",
"cgcm3_t47=a1b": "CGCM3_T47 A1B",
"giss_aom=a1b": "GISS_AOM A1B",
"hadcm3=a1b": "HADCM3 A1B",
"cgcm3_t63=a1b": "CGCM3_T63 A1B",
}
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['cache'] = 86400
d['description'] = """ """
d['arguments'] = [
dict(type='networkselect', name='station', network='CSCAP',
default='ISUAG', label='Select CSCAP Site:'),
dict(type='select', name='model', default='echo=a1b',
label='Select Model:', options=PDICT)
]
return d
def plotter( fdict ):
""" Go """
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = fdict.get('station', 'ISUAG')
nt = NetworkTable("CSCAP")
clstation = nt.sts[station]['climate_site']
(model, scenario) = fdict.get('model', 'hadgem=a1b').split("=")
(fig, ax) = plt.subplots(1, 1)
cursor.execute("""
SELECT extract(year from day) as yr, sum(case when precip > 0
THEN 1 else 0 end) from hayhoe_daily WHERE precip is not null and
station = %s and model = %s and scenario = %s
GROUP by yr ORDER by yr ASC
""", (clstation, model, scenario))
years = []
precip = []
for row in cursor:
years.append(row[0])
precip.append(row[1])
ax.bar(years, precip, ec='b', fc='b')
ax.grid(True)
ax.set_ylabel("Days Per Year")
ax.set_title("%s %s\n%s %s :: Days per Year with Measureable Precip" % (
station, nt.sts[station]['name'], model,
scenario))
return fig
|
<commit_before><commit_msg>Add plot of days per year with precip<commit_after>import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import psycopg2.extras
import pyiem.nws.vtec as vtec
from pyiem.network import Table as NetworkTable
import numpy as np
import pytz
PDICT = {
"hadgem=a1b": "HADGEM A1B",
"cnrm=a1b" : "CNRM A1B",
"echam5=a1b" : "ECHAM5 A1B",
"echo=a1b" : "ECHO A1B",
"pcm=a1b" : "PCM A1B",
"miroc_hi=a1b": "MIROC_HI A1B",
"cgcm3_t47=a1b": "CGCM3_T47 A1B",
"giss_aom=a1b": "GISS_AOM A1B",
"hadcm3=a1b": "HADCM3 A1B",
"cgcm3_t63=a1b": "CGCM3_T63 A1B",
}
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['cache'] = 86400
d['description'] = """ """
d['arguments'] = [
dict(type='networkselect', name='station', network='CSCAP',
default='ISUAG', label='Select CSCAP Site:'),
dict(type='select', name='model', default='echo=a1b',
label='Select Model:', options=PDICT)
]
return d
def plotter( fdict ):
""" Go """
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
station = fdict.get('station', 'ISUAG')
nt = NetworkTable("CSCAP")
clstation = nt.sts[station]['climate_site']
(model, scenario) = fdict.get('model', 'hadgem=a1b').split("=")
(fig, ax) = plt.subplots(1, 1)
cursor.execute("""
SELECT extract(year from day) as yr, sum(case when precip > 0
THEN 1 else 0 end) from hayhoe_daily WHERE precip is not null and
station = %s and model = %s and scenario = %s
GROUP by yr ORDER by yr ASC
""", (clstation, model, scenario))
years = []
precip = []
for row in cursor:
years.append(row[0])
precip.append(row[1])
ax.bar(years, precip, ec='b', fc='b')
ax.grid(True)
ax.set_ylabel("Days Per Year")
ax.set_title("%s %s\n%s %s :: Days per Year with Measureable Precip" % (
station, nt.sts[station]['name'], model,
scenario))
return fig
|
|
6e601d9720139bbb04c1fd30dc6552730270ba0a
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django==1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django>=1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt
|
Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt
|
Python
|
bsd-3-clause
|
sunlightlabs/billy,loandy/billy,mileswwatkins/billy,sunlightlabs/billy,openstates/billy,loandy/billy,loandy/billy,openstates/billy,mileswwatkins/billy,mileswwatkins/billy,sunlightlabs/billy,openstates/billy
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django==1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django>=1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django==1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
<commit_msg>Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt<commit_after>
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django>=1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django==1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django>=1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django==1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
<commit_msg>Fix the versioned Django, we're grabbing 1.4.1 off the requirements.txt<commit_after>#!/usr/bin/env python
from setuptools import setup, find_packages
from billy import __version__
long_description = open('README.rst').read()
setup(name='billy',
version=__version__,
packages=find_packages(),
package_data={'billy': ['schemas/*.json',
'schemas/api/*.json',
'schemas/relax/api.rnc'],
'billy.web.admin': ['templates/billy/*.html'],
},
author="James Turk",
author_email="jturk@sunlightfoundation.com",
license="GPL v3",
url="http://github.com/sunlightlabs/billy/",
description='scraping, storing, and sharing legislative information',
long_description=long_description,
platforms=['any'],
entry_points="""
[console_scripts]
billy-scrape = billy.bin.update:scrape_compat_main
billy-update = billy.bin.update:main
billy-util = billy.bin.util:main
""",
install_requires=[
"Django>=1.4",
"argparse==1.1",
"boto",
"django-piston",
"icalendar",
"lxml>=2.2",
"name_tools>=0.1.2",
"nose",
"pymongo>=2.2",
"scrapelib>=0.7.0",
"unicodecsv",
"validictory>=0.7.1",
"pyes",
]
)
|
cec3eebace1ad5f236761bdd98bef0d5ac52d3ba
|
cura/Settings/MaterialSettingsVisibilityHandler.py
|
cura/Settings/MaterialSettingsVisibilityHandler.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = set([
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
])
self.setVisible(material_settings)
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
Replace list-to-set cast with normal set literal
|
Replace list-to-set cast with normal set literal
Don't know who did this but he did wrong, yo.
|
Python
|
agpl-3.0
|
hmflash/Cura,fieldOfView/Cura,Curahelper/Cura,ynotstartups/Wanhao,ynotstartups/Wanhao,Curahelper/Cura,fieldOfView/Cura,hmflash/Cura
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = set([
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
])
self.setVisible(material_settings)
Replace list-to-set cast with normal set literal
Don't know who did this but he did wrong, yo.
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
<commit_before># Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = set([
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
])
self.setVisible(material_settings)
<commit_msg>Replace list-to-set cast with normal set literal
Don't know who did this but he did wrong, yo.<commit_after>
|
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = set([
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
])
self.setVisible(material_settings)
Replace list-to-set cast with normal set literal
Don't know who did this but he did wrong, yo.# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
<commit_before># Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = set([
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
])
self.setVisible(material_settings)
<commit_msg>Replace list-to-set cast with normal set literal
Don't know who did this but he did wrong, yo.<commit_after># Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Settings.Models.SettingVisibilityHandler import SettingVisibilityHandler
class MaterialSettingsVisibilityHandler(SettingVisibilityHandler):
def __init__(self, parent = None, *args, **kwargs):
super().__init__(parent = parent, *args, **kwargs)
material_settings = {
"default_material_print_temperature",
"material_bed_temperature",
"material_standby_temperature",
"cool_fan_speed",
"retraction_amount",
"retraction_speed",
}
self.setVisible(material_settings)
|
4c46b7b86171b89f0c85f6d48eaf6d24e702c6f9
|
samples/service_account/tasks.py
|
samples/service_account/tasks.py
|
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
|
Add a Tasks sample that demonstrates Service accounts.
|
Add a Tasks sample that demonstrates Service accounts.
Reviewed in http://codereview.appspot.com/5685068/.
Index: samples/service_account/books.py
===================================================================
new file mode 100644
|
Python
|
apache-2.0
|
jonparrott/oauth2client,clancychilds/oauth2client,googleapis/oauth2client,jonparrott/oauth2client,clancychilds/oauth2client,googleapis/google-api-python-client,googleapis/google-api-python-client,googleapis/oauth2client,google/oauth2client,google/oauth2client
|
Add a Tasks sample that demonstrates Service accounts.
Reviewed in http://codereview.appspot.com/5685068/.
Index: samples/service_account/books.py
===================================================================
new file mode 100644
|
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add a Tasks sample that demonstrates Service accounts.
Reviewed in http://codereview.appspot.com/5685068/.
Index: samples/service_account/books.py
===================================================================
new file mode 100644<commit_after>
|
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
|
Add a Tasks sample that demonstrates Service accounts.
Reviewed in http://codereview.appspot.com/5685068/.
Index: samples/service_account/books.py
===================================================================
new file mode 100644#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add a Tasks sample that demonstrates Service accounts.
Reviewed in http://codereview.appspot.com/5685068/.
Index: samples/service_account/books.py
===================================================================
new file mode 100644<commit_after>#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
|
|
a3f23b804265bd59473873c2aa071188a73a9a9e
|
slumba/tests/test_numbaext.py
|
slumba/tests/test_numbaext.py
|
import pytest
from numba import boolean, njit, int64, TypingError
from slumba.numbaext import not_null, sizeof, unsafe_cast
def test_sizeof_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_sizeof(x):
return sizeof(x)
def test_not_null_invalid():
dec = njit(boolean(int64))
with pytest.raises(TypingError):
@dec
def bad_not_null(x):
return not_null(x)
def test_unsafe_case_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_unsafe_cast(x):
return unsafe_cast(x, int64)
|
Test fail cases for custom codegen
|
Test fail cases for custom codegen
|
Python
|
apache-2.0
|
cpcloud/slumba,cpcloud/slumba
|
Test fail cases for custom codegen
|
import pytest
from numba import boolean, njit, int64, TypingError
from slumba.numbaext import not_null, sizeof, unsafe_cast
def test_sizeof_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_sizeof(x):
return sizeof(x)
def test_not_null_invalid():
dec = njit(boolean(int64))
with pytest.raises(TypingError):
@dec
def bad_not_null(x):
return not_null(x)
def test_unsafe_case_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_unsafe_cast(x):
return unsafe_cast(x, int64)
|
<commit_before><commit_msg>Test fail cases for custom codegen<commit_after>
|
import pytest
from numba import boolean, njit, int64, TypingError
from slumba.numbaext import not_null, sizeof, unsafe_cast
def test_sizeof_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_sizeof(x):
return sizeof(x)
def test_not_null_invalid():
dec = njit(boolean(int64))
with pytest.raises(TypingError):
@dec
def bad_not_null(x):
return not_null(x)
def test_unsafe_case_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_unsafe_cast(x):
return unsafe_cast(x, int64)
|
Test fail cases for custom codegenimport pytest
from numba import boolean, njit, int64, TypingError
from slumba.numbaext import not_null, sizeof, unsafe_cast
def test_sizeof_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_sizeof(x):
return sizeof(x)
def test_not_null_invalid():
dec = njit(boolean(int64))
with pytest.raises(TypingError):
@dec
def bad_not_null(x):
return not_null(x)
def test_unsafe_case_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_unsafe_cast(x):
return unsafe_cast(x, int64)
|
<commit_before><commit_msg>Test fail cases for custom codegen<commit_after>import pytest
from numba import boolean, njit, int64, TypingError
from slumba.numbaext import not_null, sizeof, unsafe_cast
def test_sizeof_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_sizeof(x):
return sizeof(x)
def test_not_null_invalid():
dec = njit(boolean(int64))
with pytest.raises(TypingError):
@dec
def bad_not_null(x):
return not_null(x)
def test_unsafe_case_invalid():
dec = njit(int64(int64))
with pytest.raises(TypingError):
@dec
def bad_unsafe_cast(x):
return unsafe_cast(x, int64)
|
|
a1ae01bada1d500bd7f9f7f0f2deb458bfa6d2d1
|
bin/serial_test.py
|
bin/serial_test.py
|
#!/usr/bin/env python
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', 9600)
sleep(3) # wait for the board to reset
print "start"
print "write"
ser.write("hello\n")
print "read"
line = ser.readline()
print "GOT %s"%line
print "write world..."
ser.write("world\n")
print "read"
line = ser.readline()
print "GOT %s"%line
line = ser.readline()
print "GOT %s"%line
cmd = ""
while not cmd == "q":
cmd = raw_input(">> ")
ser.write(cmd+"\n")
out = ser.readline()
out = ser.readline()
print out
|
Add the serial python test
|
Add the serial python test
|
Python
|
apache-2.0
|
Pitchless/arceye,Pitchless/arceye
|
Add the serial python test
|
#!/usr/bin/env python
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', 9600)
sleep(3) # wait for the board to reset
print "start"
print "write"
ser.write("hello\n")
print "read"
line = ser.readline()
print "GOT %s"%line
print "write world..."
ser.write("world\n")
print "read"
line = ser.readline()
print "GOT %s"%line
line = ser.readline()
print "GOT %s"%line
cmd = ""
while not cmd == "q":
cmd = raw_input(">> ")
ser.write(cmd+"\n")
out = ser.readline()
out = ser.readline()
print out
|
<commit_before><commit_msg>Add the serial python test<commit_after>
|
#!/usr/bin/env python
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', 9600)
sleep(3) # wait for the board to reset
print "start"
print "write"
ser.write("hello\n")
print "read"
line = ser.readline()
print "GOT %s"%line
print "write world..."
ser.write("world\n")
print "read"
line = ser.readline()
print "GOT %s"%line
line = ser.readline()
print "GOT %s"%line
cmd = ""
while not cmd == "q":
cmd = raw_input(">> ")
ser.write(cmd+"\n")
out = ser.readline()
out = ser.readline()
print out
|
Add the serial python test#!/usr/bin/env python
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', 9600)
sleep(3) # wait for the board to reset
print "start"
print "write"
ser.write("hello\n")
print "read"
line = ser.readline()
print "GOT %s"%line
print "write world..."
ser.write("world\n")
print "read"
line = ser.readline()
print "GOT %s"%line
line = ser.readline()
print "GOT %s"%line
cmd = ""
while not cmd == "q":
cmd = raw_input(">> ")
ser.write(cmd+"\n")
out = ser.readline()
out = ser.readline()
print out
|
<commit_before><commit_msg>Add the serial python test<commit_after>#!/usr/bin/env python
from serial import Serial
from time import sleep
ser = Serial('/dev/ttyUSB0', 9600)
sleep(3) # wait for the board to reset
print "start"
print "write"
ser.write("hello\n")
print "read"
line = ser.readline()
print "GOT %s"%line
print "write world..."
ser.write("world\n")
print "read"
line = ser.readline()
print "GOT %s"%line
line = ser.readline()
print "GOT %s"%line
cmd = ""
while not cmd == "q":
cmd = raw_input(">> ")
ser.write(cmd+"\n")
out = ser.readline()
out = ser.readline()
print out
|
|
1f043dd959fa1e1d243a3278abeb66838a2f9305
|
server/auvsi_suas/migrations/0013_remove_ir_as_target_type.py
|
server/auvsi_suas/migrations/0013_remove_ir_as_target_type.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0012_missionclockevent'), ]
operations = [
migrations.AlterField(
model_name='target',
name='target_type',
field=models.IntegerField(choices=[(1, b'standard'), (2, b'qrc'), (
3, b'off_axis'), (4, b'emergent')]), ),
]
|
Remove the IR target type in migration.
|
Remove the IR target type in migration.
|
Python
|
apache-2.0
|
justineaster/interop,auvsi-suas/interop,justineaster/interop,justineaster/interop,auvsi-suas/interop,justineaster/interop,auvsi-suas/interop,auvsi-suas/interop,justineaster/interop
|
Remove the IR target type in migration.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0012_missionclockevent'), ]
operations = [
migrations.AlterField(
model_name='target',
name='target_type',
field=models.IntegerField(choices=[(1, b'standard'), (2, b'qrc'), (
3, b'off_axis'), (4, b'emergent')]), ),
]
|
<commit_before><commit_msg>Remove the IR target type in migration.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0012_missionclockevent'), ]
operations = [
migrations.AlterField(
model_name='target',
name='target_type',
field=models.IntegerField(choices=[(1, b'standard'), (2, b'qrc'), (
3, b'off_axis'), (4, b'emergent')]), ),
]
|
Remove the IR target type in migration.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0012_missionclockevent'), ]
operations = [
migrations.AlterField(
model_name='target',
name='target_type',
field=models.IntegerField(choices=[(1, b'standard'), (2, b'qrc'), (
3, b'off_axis'), (4, b'emergent')]), ),
]
|
<commit_before><commit_msg>Remove the IR target type in migration.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auvsi_suas', '0012_missionclockevent'), ]
operations = [
migrations.AlterField(
model_name='target',
name='target_type',
field=models.IntegerField(choices=[(1, b'standard'), (2, b'qrc'), (
3, b'off_axis'), (4, b'emergent')]), ),
]
|
|
f301dd2366f53a6cf4b0949942b8520502f54351
|
boxsdk/__init__.py
|
boxsdk/__init__.py
|
# coding: utf-8
from __future__ import unicode_literals
from .auth.jwt_auth import JWTAuth
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
# coding: utf-8
from __future__ import unicode_literals
try:
from .auth.jwt_auth import JWTAuth
except ImportError:
JWTAuth = None # If extras are not installed, JWTAuth won't be available.
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
Fix import error when [jwt] not installed.
|
Fix import error when [jwt] not installed.
|
Python
|
apache-2.0
|
Tusky/box-python-sdk,sanketdjain/box-python-sdk,sanketdjain/box-python-sdk,Frencil/box-python-sdk,samkuehn/box-python-sdk,lkabongoVC/box-python-sdk,Frencil/box-python-sdk,box/box-python-sdk,lkabongoVC/box-python-sdk,samkuehn/box-python-sdk
|
# coding: utf-8
from __future__ import unicode_literals
from .auth.jwt_auth import JWTAuth
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
Fix import error when [jwt] not installed.
|
# coding: utf-8
from __future__ import unicode_literals
try:
from .auth.jwt_auth import JWTAuth
except ImportError:
JWTAuth = None # If extras are not installed, JWTAuth won't be available.
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
<commit_before># coding: utf-8
from __future__ import unicode_literals
from .auth.jwt_auth import JWTAuth
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
<commit_msg>Fix import error when [jwt] not installed.<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
try:
from .auth.jwt_auth import JWTAuth
except ImportError:
JWTAuth = None # If extras are not installed, JWTAuth won't be available.
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
# coding: utf-8
from __future__ import unicode_literals
from .auth.jwt_auth import JWTAuth
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
Fix import error when [jwt] not installed.# coding: utf-8
from __future__ import unicode_literals
try:
from .auth.jwt_auth import JWTAuth
except ImportError:
JWTAuth = None # If extras are not installed, JWTAuth won't be available.
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
<commit_before># coding: utf-8
from __future__ import unicode_literals
from .auth.jwt_auth import JWTAuth
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
<commit_msg>Fix import error when [jwt] not installed.<commit_after># coding: utf-8
from __future__ import unicode_literals
try:
from .auth.jwt_auth import JWTAuth
except ImportError:
JWTAuth = None # If extras are not installed, JWTAuth won't be available.
from .auth.oauth2 import OAuth2
from .client import Client
from .object import * # pylint:disable=wildcard-import,redefined-builtin
|
334961054d875641d150eec4d6938f6f824ea655
|
_gcloud_vendor/__init__.py
|
_gcloud_vendor/__init__.py
|
"""Dependencies "vendored in", due to dependencies, Python versions, etc.
Current set
-----------
``apitools`` (pending release to PyPI, plus acceptable Python version
support for its dependencies). Review before M2.
"""
|
Add initializer for top-level '_gcloud_vendor' package.
|
Add initializer for top-level '_gcloud_vendor' package.
|
Python
|
apache-2.0
|
optimizely/gcloud-python,jgeewax/gcloud-python,lucemia/gcloud-python,tseaver/google-cloud-python,dhermes/google-cloud-python,elibixby/gcloud-python,tseaver/gcloud-python,daspecster/google-cloud-python,GrimDerp/gcloud-python,blowmage/gcloud-python,jonparrott/google-cloud-python,tseaver/google-cloud-python,GoogleCloudPlatform/gcloud-python,calpeyser/google-cloud-python,waprin/gcloud-python,VitalLabs/gcloud-python,daspecster/google-cloud-python,elibixby/gcloud-python,CyrusBiotechnology/gcloud-python,dhermes/gcloud-python,thesandlord/gcloud-python,blowmage/gcloud-python,waprin/google-cloud-python,lucemia/gcloud-python,tswast/google-cloud-python,VitalLabs/gcloud-python,thesandlord/gcloud-python,jonparrott/google-cloud-python,EugenePig/gcloud-python,tseaver/gcloud-python,tswast/google-cloud-python,googleapis/google-cloud-python,tartavull/google-cloud-python,jbuberel/gcloud-python,waprin/gcloud-python,dhermes/google-cloud-python,jbuberel/gcloud-python,Fkawala/gcloud-python,quom/google-cloud-python,jonparrott/gcloud-python,dhermes/google-cloud-python,dhermes/gcloud-python,EugenePig/gcloud-python,quom/google-cloud-python,waprin/google-cloud-python,GrimDerp/gcloud-python,Fkawala/gcloud-python,tseaver/google-cloud-python,vj-ug/gcloud-python,jonparrott/gcloud-python,jgeewax/gcloud-python,calpeyser/google-cloud-python,googleapis/google-cloud-python,CyrusBiotechnology/gcloud-python,optimizely/gcloud-python,tswast/google-cloud-python,optimizely/gcloud-python,GoogleCloudPlatform/gcloud-python,tartavull/google-cloud-python,vj-ug/gcloud-python
|
Add initializer for top-level '_gcloud_vendor' package.
|
"""Dependencies "vendored in", due to dependencies, Python versions, etc.
Current set
-----------
``apitools`` (pending release to PyPI, plus acceptable Python version
support for its dependencies). Review before M2.
"""
|
<commit_before><commit_msg>Add initializer for top-level '_gcloud_vendor' package.<commit_after>
|
"""Dependencies "vendored in", due to dependencies, Python versions, etc.
Current set
-----------
``apitools`` (pending release to PyPI, plus acceptable Python version
support for its dependencies). Review before M2.
"""
|
Add initializer for top-level '_gcloud_vendor' package."""Dependencies "vendored in", due to dependencies, Python versions, etc.
Current set
-----------
``apitools`` (pending release to PyPI, plus acceptable Python version
support for its dependencies). Review before M2.
"""
|
<commit_before><commit_msg>Add initializer for top-level '_gcloud_vendor' package.<commit_after>"""Dependencies "vendored in", due to dependencies, Python versions, etc.
Current set
-----------
``apitools`` (pending release to PyPI, plus acceptable Python version
support for its dependencies). Review before M2.
"""
|
|
0f9f4f1ee325d72d09625850ba6a153ae5616ab0
|
nose2/tests/functional/test_collect_plugin.py
|
nose2/tests/functional/test_collect_plugin.py
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only',
'--plugin=nose2.plugins.collect'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
Update test to load plugin
|
Update test to load plugin
collectonly no longer loaded by default
|
Python
|
bsd-2-clause
|
ptthiem/nose2,little-dude/nose2,little-dude/nose2,leth/nose2,ojengwa/nose2,ezigman/nose2,ojengwa/nose2,ezigman/nose2,leth/nose2,ptthiem/nose2
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
Update test to load plugin
collectonly no longer loaded by default
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only',
'--plugin=nose2.plugins.collect'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
<commit_before>import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
<commit_msg>Update test to load plugin
collectonly no longer loaded by default<commit_after>
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only',
'--plugin=nose2.plugins.collect'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
Update test to load plugin
collectonly no longer loaded by defaultimport re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only',
'--plugin=nose2.plugins.collect'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
<commit_before>import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
<commit_msg>Update test to load plugin
collectonly no longer loaded by default<commit_after>import re
from nose2.tests._common import FunctionalTestCase
class CollectOnlyFunctionalTest(FunctionalTestCase):
def test_collect_tests_in_package(self):
self.assertTestRunOutputMatches(
self.runIn('scenario/tests_in_package', '-v', '--collect-only',
'--plugin=nose2.plugins.collect'),
stderr=EXPECT_LAYOUT1)
# expectations
EXPECT_LAYOUT1 = re.compile("""\
Ran 25 tests in \d.\d+s
OK""")
|
dc3ee951363116b235ec96bef34b06a661fc4795
|
examples/fail_if_old_driver_test.py
|
examples/fail_if_old_driver_test.py
|
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_fail_if_using_an_old_chromedriver(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split('.')[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split('.')[0]
install_sb = "sbase install chromedriver %s" % major_chrome_version
if (
int(major_chromedriver_version) < 73
and int(major_chrome_version) >= 73
):
message = (
'Your version of chromedriver: "%s"\n '
'is too old for your version of Chrome: "%s"\n'
'You should upgrade chromedriver '
"to receive important bug fixes!\n"
'Run this command to upgrade: "%s"'
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message) # chromedriver is out-of-date
|
Add a test that fails if using an old version of chromedriver
|
Add a test that fails if using an old version of chromedriver
|
Python
|
mit
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add a test that fails if using an old version of chromedriver
|
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_fail_if_using_an_old_chromedriver(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split('.')[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split('.')[0]
install_sb = "sbase install chromedriver %s" % major_chrome_version
if (
int(major_chromedriver_version) < 73
and int(major_chrome_version) >= 73
):
message = (
'Your version of chromedriver: "%s"\n '
'is too old for your version of Chrome: "%s"\n'
'You should upgrade chromedriver '
"to receive important bug fixes!\n"
'Run this command to upgrade: "%s"'
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message) # chromedriver is out-of-date
|
<commit_before><commit_msg>Add a test that fails if using an old version of chromedriver<commit_after>
|
from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_fail_if_using_an_old_chromedriver(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split('.')[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split('.')[0]
install_sb = "sbase install chromedriver %s" % major_chrome_version
if (
int(major_chromedriver_version) < 73
and int(major_chrome_version) >= 73
):
message = (
'Your version of chromedriver: "%s"\n '
'is too old for your version of Chrome: "%s"\n'
'You should upgrade chromedriver '
"to receive important bug fixes!\n"
'Run this command to upgrade: "%s"'
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message) # chromedriver is out-of-date
|
Add a test that fails if using an old version of chromedriverfrom seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_fail_if_using_an_old_chromedriver(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split('.')[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split('.')[0]
install_sb = "sbase install chromedriver %s" % major_chrome_version
if (
int(major_chromedriver_version) < 73
and int(major_chrome_version) >= 73
):
message = (
'Your version of chromedriver: "%s"\n '
'is too old for your version of Chrome: "%s"\n'
'You should upgrade chromedriver '
"to receive important bug fixes!\n"
'Run this command to upgrade: "%s"'
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message) # chromedriver is out-of-date
|
<commit_before><commit_msg>Add a test that fails if using an old version of chromedriver<commit_after>from seleniumbase import BaseCase
class ChromedriverTests(BaseCase):
def test_fail_if_using_an_old_chromedriver(self):
if self.browser != "chrome":
print("\n This test is only for Chrome!")
print(" (Run with: '--browser=chrome')")
self.skip("This test is only for Chrome!")
chrome_version = self.get_chrome_version()
major_chrome_version = chrome_version.split('.')[0]
chromedriver_version = self.get_chromedriver_version()
major_chromedriver_version = chromedriver_version.split('.')[0]
install_sb = "sbase install chromedriver %s" % major_chrome_version
if (
int(major_chromedriver_version) < 73
and int(major_chrome_version) >= 73
):
message = (
'Your version of chromedriver: "%s"\n '
'is too old for your version of Chrome: "%s"\n'
'You should upgrade chromedriver '
"to receive important bug fixes!\n"
'Run this command to upgrade: "%s"'
% (chromedriver_version, chrome_version, install_sb)
)
raise Exception(message) # chromedriver is out-of-date
|
|
30f704c3e767462fefb5086bbf6b5f190cec7c1b
|
search/fibonacci_search/python/fibonacci_search.py
|
search/fibonacci_search/python/fibonacci_search.py
|
#Fibonacci search for sorted algorithm
def fibSearch(arr,x):
#fibonacci numbers initialization
fib2 = 0
fib1 = 1
fib = fib2 + fib1
n = len(arr)
#find the smallest fibonacci greater than or equal to array length
while (fib < n):
fib2 = fib1
fib1 = fib
fib = fib2 + fib1
#the leftout list for the array
offset = -1
while (fib > 1):
#check if fib2 is a valid index
i = min(offset+fib2, n-1)
#if x is greater than the value at index fib2,
#cut the array from offset to i
if (arr[i] < x):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
offset = i
#if x is smaller than the value at index fib2,
#cut the array after i+1
elif (arr[i] > x):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
#return i when element is found
else :
return i
#compare last element with x
if (fib1 and arr[offset+1] == x):
return offset+1
#when element not found return -1
return -1
arr = [10, 22, 35, 40, 45, 50, 80, 82, 85, 90, 100]
x = 85
print ("Found at index: ", fibSearch(arr, x))
|
Add fibonacci search in python
|
Add fibonacci search in python
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add fibonacci search in python
|
#Fibonacci search for sorted algorithm
def fibSearch(arr,x):
#fibonacci numbers initialization
fib2 = 0
fib1 = 1
fib = fib2 + fib1
n = len(arr)
#find the smallest fibonacci greater than or equal to array length
while (fib < n):
fib2 = fib1
fib1 = fib
fib = fib2 + fib1
#the leftout list for the array
offset = -1
while (fib > 1):
#check if fib2 is a valid index
i = min(offset+fib2, n-1)
#if x is greater than the value at index fib2,
#cut the array from offset to i
if (arr[i] < x):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
offset = i
#if x is smaller than the value at index fib2,
#cut the array after i+1
elif (arr[i] > x):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
#return i when element is found
else :
return i
#compare last element with x
if (fib1 and arr[offset+1] == x):
return offset+1
#when element not found return -1
return -1
arr = [10, 22, 35, 40, 45, 50, 80, 82, 85, 90, 100]
x = 85
print ("Found at index: ", fibSearch(arr, x))
|
<commit_before><commit_msg>Add fibonacci search in python<commit_after>
|
#Fibonacci search for sorted algorithm
def fibSearch(arr,x):
#fibonacci numbers initialization
fib2 = 0
fib1 = 1
fib = fib2 + fib1
n = len(arr)
#find the smallest fibonacci greater than or equal to array length
while (fib < n):
fib2 = fib1
fib1 = fib
fib = fib2 + fib1
#the leftout list for the array
offset = -1
while (fib > 1):
#check if fib2 is a valid index
i = min(offset+fib2, n-1)
#if x is greater than the value at index fib2,
#cut the array from offset to i
if (arr[i] < x):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
offset = i
#if x is smaller than the value at index fib2,
#cut the array after i+1
elif (arr[i] > x):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
#return i when element is found
else :
return i
#compare last element with x
if (fib1 and arr[offset+1] == x):
return offset+1
#when element not found return -1
return -1
arr = [10, 22, 35, 40, 45, 50, 80, 82, 85, 90, 100]
x = 85
print ("Found at index: ", fibSearch(arr, x))
|
Add fibonacci search in python#Fibonacci search for sorted algorithm
def fibSearch(arr,x):
#fibonacci numbers initialization
fib2 = 0
fib1 = 1
fib = fib2 + fib1
n = len(arr)
#find the smallest fibonacci greater than or equal to array length
while (fib < n):
fib2 = fib1
fib1 = fib
fib = fib2 + fib1
#the leftout list for the array
offset = -1
while (fib > 1):
#check if fib2 is a valid index
i = min(offset+fib2, n-1)
#if x is greater than the value at index fib2,
#cut the array from offset to i
if (arr[i] < x):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
offset = i
#if x is smaller than the value at index fib2,
#cut the array after i+1
elif (arr[i] > x):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
#return i when element is found
else :
return i
#compare last element with x
if (fib1 and arr[offset+1] == x):
return offset+1
#when element not found return -1
return -1
arr = [10, 22, 35, 40, 45, 50, 80, 82, 85, 90, 100]
x = 85
print ("Found at index: ", fibSearch(arr, x))
|
<commit_before><commit_msg>Add fibonacci search in python<commit_after>#Fibonacci search for sorted algorithm
def fibSearch(arr,x):
#fibonacci numbers initialization
fib2 = 0
fib1 = 1
fib = fib2 + fib1
n = len(arr)
#find the smallest fibonacci greater than or equal to array length
while (fib < n):
fib2 = fib1
fib1 = fib
fib = fib2 + fib1
#the leftout list for the array
offset = -1
while (fib > 1):
#check if fib2 is a valid index
i = min(offset+fib2, n-1)
#if x is greater than the value at index fib2,
#cut the array from offset to i
if (arr[i] < x):
fib = fib1
fib1 = fib2
fib2 = fib - fib1
offset = i
#if x is smaller than the value at index fib2,
#cut the array after i+1
elif (arr[i] > x):
fib = fib2
fib1 = fib1 - fib2
fib2 = fib - fib1
#return i when element is found
else :
return i
#compare last element with x
if (fib1 and arr[offset+1] == x):
return offset+1
#when element not found return -1
return -1
arr = [10, 22, 35, 40, 45, 50, 80, 82, 85, 90, 100]
x = 85
print ("Found at index: ", fibSearch(arr, x))
|
|
54eca489024d3d8f354a44d161797edb8e916600
|
tests/test_saw.py
|
tests/test_saw.py
|
import unittest
from saw.saw import Saw
class Test_Saw(unittest.TestCase):
def setUp(self):
text = "Starting right this second, it's way easier to merge Pull Requests! \
We usually merge them from the comfortable glow of our computers, but with the\
new mobile site we're comfortable merging smaller Pull Requests while sitting\
on the hyperloop (or while on the bus, I guess)."
self.obj = Saw().load(text)
def test_saw(self):
self.assertEqual(self.obj.paragraphs[0].sentences[0].blocks, self.obj.blocks)
if __name__ == "__main__":
unittest.main()
|
Add tests - now very simple and primitive
|
Add tests - now very simple and primitive
|
Python
|
mit
|
diNard/Saw
|
Add tests - now very simple and primitive
|
import unittest
from saw.saw import Saw
class Test_Saw(unittest.TestCase):
def setUp(self):
text = "Starting right this second, it's way easier to merge Pull Requests! \
We usually merge them from the comfortable glow of our computers, but with the\
new mobile site we're comfortable merging smaller Pull Requests while sitting\
on the hyperloop (or while on the bus, I guess)."
self.obj = Saw().load(text)
def test_saw(self):
self.assertEqual(self.obj.paragraphs[0].sentences[0].blocks, self.obj.blocks)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests - now very simple and primitive<commit_after>
|
import unittest
from saw.saw import Saw
class Test_Saw(unittest.TestCase):
def setUp(self):
text = "Starting right this second, it's way easier to merge Pull Requests! \
We usually merge them from the comfortable glow of our computers, but with the\
new mobile site we're comfortable merging smaller Pull Requests while sitting\
on the hyperloop (or while on the bus, I guess)."
self.obj = Saw().load(text)
def test_saw(self):
self.assertEqual(self.obj.paragraphs[0].sentences[0].blocks, self.obj.blocks)
if __name__ == "__main__":
unittest.main()
|
Add tests - now very simple and primitiveimport unittest
from saw.saw import Saw
class Test_Saw(unittest.TestCase):
def setUp(self):
text = "Starting right this second, it's way easier to merge Pull Requests! \
We usually merge them from the comfortable glow of our computers, but with the\
new mobile site we're comfortable merging smaller Pull Requests while sitting\
on the hyperloop (or while on the bus, I guess)."
self.obj = Saw().load(text)
def test_saw(self):
self.assertEqual(self.obj.paragraphs[0].sentences[0].blocks, self.obj.blocks)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests - now very simple and primitive<commit_after>import unittest
from saw.saw import Saw
class Test_Saw(unittest.TestCase):
def setUp(self):
text = "Starting right this second, it's way easier to merge Pull Requests! \
We usually merge them from the comfortable glow of our computers, but with the\
new mobile site we're comfortable merging smaller Pull Requests while sitting\
on the hyperloop (or while on the bus, I guess)."
self.obj = Saw().load(text)
def test_saw(self):
self.assertEqual(self.obj.paragraphs[0].sentences[0].blocks, self.obj.blocks)
if __name__ == "__main__":
unittest.main()
|
|
1bd21c7b35a100e0f72f03bd9e0d783dc136c41e
|
cla_backend/apps/cla_butler/management/commands/monitor_multiple_outcome_codes.py
|
cla_backend/apps/cla_butler/management/commands/monitor_multiple_outcome_codes.py
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from django.utils.timezone import now
from cla_butler.stack import is_first_instance, InstanceNotInAsgException, StackException
from cla_eventlog.models import Log
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'LGA-294 specific monitoring command. Alert when multiple outcome codes ' \
'that should only occur once are found for today (since 00:00)'
def handle(self, *args, **options):
if self.should_run_housekeeping(**options):
self.check_for_multiple_outcome_codes()
else:
logger.debug('LGA-294 Skip check_for_multiple_outcome_codes: running on secondary instance')
@staticmethod
def check_for_multiple_outcome_codes():
# Outcome codes defined to appear only once on a case:
# https://docs.google.com/spreadsheets/d/1hN64bA_H2a_0eC_5-k-0IY2-RKbCor2VGflp1ykQGa0/
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
once_only_codes = ['PCB', 'COPE', 'DUPL', 'MRNB', 'NCOE', 'DESP', 'DECL', 'MRCC', 'NRES', 'CPTA',
'COSPF', 'SPFM', 'SPFN', 'DREFER', 'COI', 'CLSP', 'MANALC', 'MANREF', 'MIS',
'MIS-MEANS', 'MIS-OOS', 'REF-EXT', 'REF-INT', 'REFSP', 'REOPEN', 'SPOR', 'WROF']
once_only_events_today = Log.objects.filter(created__gte=start_of_today, code__in=once_only_codes)
once_only_codes_today = once_only_events_today.only('case__reference', 'code', 'created')
once_only_codes_today_counts = once_only_codes_today.values('case__reference', 'code') \
.annotate(total=Count('code'), earliest=Min('created'), latest=Max('created'))
multiple_codes_today = once_only_codes_today_counts.filter(total__gt=1).order_by('-total')
if multiple_codes_today.exists():
for i in multiple_codes_today:
logger.warning('LGA-294 investigation. Multiple outcome codes today for case: {}'.format(i))
else:
logger.info('LGA-294 No multiple outcome codes found for today')
@staticmethod
def should_run_housekeeping(**options):
if options.get('force', False):
return True
try:
return is_first_instance()
except InstanceNotInAsgException:
logger.info('EC2 instance not in an ASG')
return True
except StackException:
logger.info('Not running on EC2 instance')
return True
|
Check for multiple outcome codes occurring today
|
Check for multiple outcome codes occurring today
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Check for multiple outcome codes occurring today
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from django.utils.timezone import now
from cla_butler.stack import is_first_instance, InstanceNotInAsgException, StackException
from cla_eventlog.models import Log
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'LGA-294 specific monitoring command. Alert when multiple outcome codes ' \
'that should only occur once are found for today (since 00:00)'
def handle(self, *args, **options):
if self.should_run_housekeeping(**options):
self.check_for_multiple_outcome_codes()
else:
logger.debug('LGA-294 Skip check_for_multiple_outcome_codes: running on secondary instance')
@staticmethod
def check_for_multiple_outcome_codes():
# Outcome codes defined to appear only once on a case:
# https://docs.google.com/spreadsheets/d/1hN64bA_H2a_0eC_5-k-0IY2-RKbCor2VGflp1ykQGa0/
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
once_only_codes = ['PCB', 'COPE', 'DUPL', 'MRNB', 'NCOE', 'DESP', 'DECL', 'MRCC', 'NRES', 'CPTA',
'COSPF', 'SPFM', 'SPFN', 'DREFER', 'COI', 'CLSP', 'MANALC', 'MANREF', 'MIS',
'MIS-MEANS', 'MIS-OOS', 'REF-EXT', 'REF-INT', 'REFSP', 'REOPEN', 'SPOR', 'WROF']
once_only_events_today = Log.objects.filter(created__gte=start_of_today, code__in=once_only_codes)
once_only_codes_today = once_only_events_today.only('case__reference', 'code', 'created')
once_only_codes_today_counts = once_only_codes_today.values('case__reference', 'code') \
.annotate(total=Count('code'), earliest=Min('created'), latest=Max('created'))
multiple_codes_today = once_only_codes_today_counts.filter(total__gt=1).order_by('-total')
if multiple_codes_today.exists():
for i in multiple_codes_today:
logger.warning('LGA-294 investigation. Multiple outcome codes today for case: {}'.format(i))
else:
logger.info('LGA-294 No multiple outcome codes found for today')
@staticmethod
def should_run_housekeeping(**options):
if options.get('force', False):
return True
try:
return is_first_instance()
except InstanceNotInAsgException:
logger.info('EC2 instance not in an ASG')
return True
except StackException:
logger.info('Not running on EC2 instance')
return True
|
<commit_before><commit_msg>Check for multiple outcome codes occurring today<commit_after>
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from django.utils.timezone import now
from cla_butler.stack import is_first_instance, InstanceNotInAsgException, StackException
from cla_eventlog.models import Log
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'LGA-294 specific monitoring command. Alert when multiple outcome codes ' \
'that should only occur once are found for today (since 00:00)'
def handle(self, *args, **options):
if self.should_run_housekeeping(**options):
self.check_for_multiple_outcome_codes()
else:
logger.debug('LGA-294 Skip check_for_multiple_outcome_codes: running on secondary instance')
@staticmethod
def check_for_multiple_outcome_codes():
# Outcome codes defined to appear only once on a case:
# https://docs.google.com/spreadsheets/d/1hN64bA_H2a_0eC_5-k-0IY2-RKbCor2VGflp1ykQGa0/
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
once_only_codes = ['PCB', 'COPE', 'DUPL', 'MRNB', 'NCOE', 'DESP', 'DECL', 'MRCC', 'NRES', 'CPTA',
'COSPF', 'SPFM', 'SPFN', 'DREFER', 'COI', 'CLSP', 'MANALC', 'MANREF', 'MIS',
'MIS-MEANS', 'MIS-OOS', 'REF-EXT', 'REF-INT', 'REFSP', 'REOPEN', 'SPOR', 'WROF']
once_only_events_today = Log.objects.filter(created__gte=start_of_today, code__in=once_only_codes)
once_only_codes_today = once_only_events_today.only('case__reference', 'code', 'created')
once_only_codes_today_counts = once_only_codes_today.values('case__reference', 'code') \
.annotate(total=Count('code'), earliest=Min('created'), latest=Max('created'))
multiple_codes_today = once_only_codes_today_counts.filter(total__gt=1).order_by('-total')
if multiple_codes_today.exists():
for i in multiple_codes_today:
logger.warning('LGA-294 investigation. Multiple outcome codes today for case: {}'.format(i))
else:
logger.info('LGA-294 No multiple outcome codes found for today')
@staticmethod
def should_run_housekeeping(**options):
if options.get('force', False):
return True
try:
return is_first_instance()
except InstanceNotInAsgException:
logger.info('EC2 instance not in an ASG')
return True
except StackException:
logger.info('Not running on EC2 instance')
return True
|
Check for multiple outcome codes occurring today# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from django.utils.timezone import now
from cla_butler.stack import is_first_instance, InstanceNotInAsgException, StackException
from cla_eventlog.models import Log
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'LGA-294 specific monitoring command. Alert when multiple outcome codes ' \
'that should only occur once are found for today (since 00:00)'
def handle(self, *args, **options):
if self.should_run_housekeeping(**options):
self.check_for_multiple_outcome_codes()
else:
logger.debug('LGA-294 Skip check_for_multiple_outcome_codes: running on secondary instance')
@staticmethod
def check_for_multiple_outcome_codes():
# Outcome codes defined to appear only once on a case:
# https://docs.google.com/spreadsheets/d/1hN64bA_H2a_0eC_5-k-0IY2-RKbCor2VGflp1ykQGa0/
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
once_only_codes = ['PCB', 'COPE', 'DUPL', 'MRNB', 'NCOE', 'DESP', 'DECL', 'MRCC', 'NRES', 'CPTA',
'COSPF', 'SPFM', 'SPFN', 'DREFER', 'COI', 'CLSP', 'MANALC', 'MANREF', 'MIS',
'MIS-MEANS', 'MIS-OOS', 'REF-EXT', 'REF-INT', 'REFSP', 'REOPEN', 'SPOR', 'WROF']
once_only_events_today = Log.objects.filter(created__gte=start_of_today, code__in=once_only_codes)
once_only_codes_today = once_only_events_today.only('case__reference', 'code', 'created')
once_only_codes_today_counts = once_only_codes_today.values('case__reference', 'code') \
.annotate(total=Count('code'), earliest=Min('created'), latest=Max('created'))
multiple_codes_today = once_only_codes_today_counts.filter(total__gt=1).order_by('-total')
if multiple_codes_today.exists():
for i in multiple_codes_today:
logger.warning('LGA-294 investigation. Multiple outcome codes today for case: {}'.format(i))
else:
logger.info('LGA-294 No multiple outcome codes found for today')
@staticmethod
def should_run_housekeeping(**options):
if options.get('force', False):
return True
try:
return is_first_instance()
except InstanceNotInAsgException:
logger.info('EC2 instance not in an ASG')
return True
except StackException:
logger.info('Not running on EC2 instance')
return True
|
<commit_before><commit_msg>Check for multiple outcome codes occurring today<commit_after># coding=utf-8
import logging
from django.core.management.base import BaseCommand
from django.db.models import Count, Max, Min
from django.utils.timezone import now
from cla_butler.stack import is_first_instance, InstanceNotInAsgException, StackException
from cla_eventlog.models import Log
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'LGA-294 specific monitoring command. Alert when multiple outcome codes ' \
'that should only occur once are found for today (since 00:00)'
def handle(self, *args, **options):
if self.should_run_housekeeping(**options):
self.check_for_multiple_outcome_codes()
else:
logger.debug('LGA-294 Skip check_for_multiple_outcome_codes: running on secondary instance')
@staticmethod
def check_for_multiple_outcome_codes():
# Outcome codes defined to appear only once on a case:
# https://docs.google.com/spreadsheets/d/1hN64bA_H2a_0eC_5-k-0IY2-RKbCor2VGflp1ykQGa0/
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
once_only_codes = ['PCB', 'COPE', 'DUPL', 'MRNB', 'NCOE', 'DESP', 'DECL', 'MRCC', 'NRES', 'CPTA',
'COSPF', 'SPFM', 'SPFN', 'DREFER', 'COI', 'CLSP', 'MANALC', 'MANREF', 'MIS',
'MIS-MEANS', 'MIS-OOS', 'REF-EXT', 'REF-INT', 'REFSP', 'REOPEN', 'SPOR', 'WROF']
once_only_events_today = Log.objects.filter(created__gte=start_of_today, code__in=once_only_codes)
once_only_codes_today = once_only_events_today.only('case__reference', 'code', 'created')
once_only_codes_today_counts = once_only_codes_today.values('case__reference', 'code') \
.annotate(total=Count('code'), earliest=Min('created'), latest=Max('created'))
multiple_codes_today = once_only_codes_today_counts.filter(total__gt=1).order_by('-total')
if multiple_codes_today.exists():
for i in multiple_codes_today:
logger.warning('LGA-294 investigation. Multiple outcome codes today for case: {}'.format(i))
else:
logger.info('LGA-294 No multiple outcome codes found for today')
@staticmethod
def should_run_housekeeping(**options):
if options.get('force', False):
return True
try:
return is_first_instance()
except InstanceNotInAsgException:
logger.info('EC2 instance not in an ASG')
return True
except StackException:
logger.info('Not running on EC2 instance')
return True
|
|
ff151c8ea04268d2060cf8d281294a0d500ecbba
|
tests/integration/resilience_test.py
|
tests/integration/resilience_test.py
|
from __future__ import unicode_literals
from __future__ import absolute_import
import mock
from compose.project import Project
from .testcases import DockerClientTestCase
class ResilienceTest(DockerClientTestCase):
def test_recreate_fails(self):
db = self.create_service('db', volumes=['/var/db'], command='top')
project = Project('composetest', [db], self.client)
container = db.create_container()
db.start_container(container)
host_path = container.get('Volumes')['/var/db']
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
with mock.patch('compose.service.Service.create_container', crash):
with self.assertRaises(Crash):
project.up()
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
class Crash(Exception):
pass
def crash(*args, **kwargs):
raise Crash()
|
Test that data volumes now survive a crash when recreating
|
Test that data volumes now survive a crash when recreating
Signed-off-by: Aanand Prasad <94fc4f3e3d0be608b3ed7b8529ff28d2a445cce1@gmail.com>
|
Python
|
apache-2.0
|
marcusmartins/compose,alexandrev/compose,alunduil/fig,sanscontext/docker.github.io,dopry/compose,aanand/fig,tangkun75/compose,danix800/docker.github.io,cclauss/compose,tiry/compose,joaofnfernandes/docker.github.io,Yelp/docker-compose,BSWANG/denverdino.github.io,twitherspoon/compose,alunduil/fig,rillig/docker.github.io,mosquito/docker-compose,sanscontext/docker.github.io,dnephin/compose,twitherspoon/compose,ZJaffee/compose,zhangspook/compose,cgvarela/compose,docker-zh/docker.github.io,mindaugasrukas/compose,shubheksha/docker.github.io,charleswhchan/compose,mark-adams/compose,mnowster/compose,ph-One/compose,rillig/docker.github.io,Chouser/compose,ChrisChinchilla/compose,johnstep/docker.github.io,MSakamaki/compose,browning/compose,aanand/fig,alexisbellido/docker.github.io,j-fuentes/compose,artemkaint/compose,jorgeLuizChaves/compose,lukemarsden/compose,phiroict/docker,rgbkrk/compose,menglingwei/denverdino.github.io,Katlean/fig,anweiss/docker.github.io,feelobot/compose,vlajos/compose,denverdino/denverdino.github.io,menglingwei/denverdino.github.io,joaofnfernandes/docker.github.io,mohitsoni/compose,runcom/compose,londoncalling/docker.github.io,alexisbellido/docker.github.io,gdevillele/docker.github.io,bfirsh/fig,LuisBosquez/docker.github.io,screwgoth/compose,anweiss/docker.github.io,ggtools/compose,denverdino/denverdino.github.io,swoopla/compose,Yelp/docker-compose,josephpage/compose,philwrenn/compose,michael-k/docker-compose,docker/docker.github.io,KalleDK/compose,genki/compose,vdemeester/compose,rillig/docker.github.io,gdevillele/docker.github.io,mbailey/compose,JimGalasyn/docker.github.io,mnuessler/compose,joeuo/docker.github.io,lmesz/compose,RobertNorthard/compose,danix800/docker.github.io,docker/docker.github.io,thaJeztah/docker.github.io,shin-/docker.github.io,denverdino/docker.github.io,bdwill/docker.github.io,danix800/docker.github.io,denverdino/docker.github.io,LuisBosquez/docker.github.io,d2bit/compose,xydinesh/compose,hoogenm/compose,amitsaha/compose,sdurrheimer/compose,DoubleMalt/compose,iamluc/compose,ekristen/compose,shubheksha/docker.github.io,dnephin/compose,mark-adams/compose,shin-/docker.github.io,josephpage/compose,joeuo/docker.github.io,alexandrev/compose,jonaseck2/compose,d2bit/compose,kikkomep/compose,jonaseck2/compose,shin-/compose,benhamill/compose,jzwlqx/denverdino.github.io,londoncalling/docker.github.io,joeuo/docker.github.io,denverdino/denverdino.github.io,albers/compose,talolard/compose,menglingwei/denverdino.github.io,sdurrheimer/compose,schmunk42/compose,funkyfuture/docker-compose,johnstep/docker.github.io,troy0820/docker.github.io,ChrisChinchilla/compose,sanscontext/docker.github.io,mnuessler/compose,DoubleMalt/compose,simonista/compose,BSWANG/denverdino.github.io,johnstep/docker.github.io,prologic/compose,bdwill/docker.github.io,mdaue/compose,dbdd4us/compose,joeuo/docker.github.io,benhamill/compose,feelobot/compose,jzwlqx/denverdino.github.io,bobphill/compose,johnstep/docker.github.io,kojiromike/compose,noironetworks/compose,Chouser/compose,andrewgee/compose,JimGalasyn/docker.github.io,phiroict/docker,xydinesh/compose,jrabbit/compose,TomasTomecek/compose,saada/compose,VinceBarresi/compose,JimGalasyn/docker.github.io,mohitsoni/compose,gdevillele/docker.github.io,zhangspook/compose,vlajos/compose,lmesz/compose,iamluc/compose,viranch/compose,nhumrich/compose,mrfuxi/compose,rillig/docker.github.io,runcom/compose,artemkaint/compose,mrfuxi/compose,dockerhn/compose,gdevillele/docker.github.io,brunocascio/compose,ionrock/compose,docker-zh/docker.github.io,RobertNorthard/compose,bfirsh/fig,Katlean/fig,joaofnfernandes/docker.github.io,jgrowl/compose,tiry/compose,rstacruz/compose,thaJeztah/compose,shubheksha/docker.github.io,londoncalling/docker.github.io,dbdd4us/compose,sanscontext/docker.github.io,mchasal/compose,mbailey/compose,sanscontext/docker.github.io,brunocascio/compose,philwrenn/compose,TomasTomecek/compose,kojiromike/compose,londoncalling/docker.github.io,alexisbellido/docker.github.io,JimGalasyn/docker.github.io,aduermael/docker.github.io,bobphill/compose,denverdino/docker.github.io,aduermael/docker.github.io,thaJeztah/docker.github.io,anweiss/docker.github.io,jzwlqx/denverdino.github.io,moxiegirl/compose,uvgroovy/compose,bbirand/compose,troy0820/docker.github.io,troy0820/docker.github.io,docker-zh/docker.github.io,jzwlqx/denverdino.github.io,michael-k/docker-compose,KevinGreene/compose,docker-zh/docker.github.io,goloveychuk/compose,qzio/compose,ralphtheninja/compose,ralphtheninja/compose,denverdino/docker.github.io,tangkun75/compose,KalleDK/compose,shubheksha/docker.github.io,thaJeztah/compose,cclauss/compose,danix800/docker.github.io,joeuo/docker.github.io,menglingwei/denverdino.github.io,ph-One/compose,GM-Alex/compose,docker/docker.github.io,browning/compose,denverdino/compose,anweiss/docker.github.io,unodba/compose,bdwill/docker.github.io,unodba/compose,albers/compose,anweiss/docker.github.io,aduermael/docker.github.io,BSWANG/denverdino.github.io,nhumrich/compose,simonista/compose,dockerhn/compose,LuisBosquez/docker.github.io,genki/compose,uvgroovy/compose,shubheksha/docker.github.io,jorgeLuizChaves/compose,BSWANG/denverdino.github.io,jrabbit/compose,rstacruz/compose,pspierce/compose,ekristen/compose,goloveychuk/compose,phiroict/docker,docker-zh/docker.github.io,alexisbellido/docker.github.io,phiroict/docker,Dakno/compose,denverdino/compose,denverdino/denverdino.github.io,swoopla/compose,MSakamaki/compose,au-phiware/compose,dopry/compose,JimGalasyn/docker.github.io,andrewgee/compose,KevinGreene/compose,BSWANG/denverdino.github.io,ggtools/compose,ionrock/compose,kikkomep/compose,ZJaffee/compose,bbirand/compose,alexisbellido/docker.github.io,screwgoth/compose,bsmr-docker/compose,amitsaha/compose,joaofnfernandes/docker.github.io,LuisBosquez/docker.github.io,mosquito/docker-compose,qzio/compose,denverdino/docker.github.io,TheDataShed/compose,troy0820/docker.github.io,talolard/compose,schmunk42/compose,jeanpralo/compose,shin-/compose,j-fuentes/compose,TheDataShed/compose,cgvarela/compose,jiekechoo/compose,pspierce/compose,moxiegirl/compose,au-phiware/compose,dilgerma/compose,VinceBarresi/compose,shin-/docker.github.io,denverdino/denverdino.github.io,gtrdotmcs/compose,thaJeztah/docker.github.io,rgbkrk/compose,noironetworks/compose,thaJeztah/docker.github.io,aduermael/docker.github.io,mindaugasrukas/compose,jessekl/compose,joaofnfernandes/docker.github.io,menglingwei/denverdino.github.io,gdevillele/docker.github.io,bdwill/docker.github.io,londoncalling/docker.github.io,bdwill/docker.github.io,mdaue/compose,saada/compose,mchasal/compose,funkyfuture/docker-compose,docker/docker.github.io,shin-/docker.github.io,lukemarsden/compose,hoogenm/compose,prologic/compose,Dakno/compose,jzwlqx/denverdino.github.io,jgrowl/compose,charleswhchan/compose,dilgerma/compose,jiekechoo/compose,viranch/compose,johnstep/docker.github.io,mnowster/compose,vdemeester/compose,gtrdotmcs/compose,docker/docker.github.io,phiroict/docker,thaJeztah/docker.github.io,shin-/docker.github.io,LuisBosquez/docker.github.io,jeanpralo/compose,jessekl/compose,marcusmartins/compose,GM-Alex/compose,bsmr-docker/compose
|
Test that data volumes now survive a crash when recreating
Signed-off-by: Aanand Prasad <94fc4f3e3d0be608b3ed7b8529ff28d2a445cce1@gmail.com>
|
from __future__ import unicode_literals
from __future__ import absolute_import
import mock
from compose.project import Project
from .testcases import DockerClientTestCase
class ResilienceTest(DockerClientTestCase):
def test_recreate_fails(self):
db = self.create_service('db', volumes=['/var/db'], command='top')
project = Project('composetest', [db], self.client)
container = db.create_container()
db.start_container(container)
host_path = container.get('Volumes')['/var/db']
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
with mock.patch('compose.service.Service.create_container', crash):
with self.assertRaises(Crash):
project.up()
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
class Crash(Exception):
pass
def crash(*args, **kwargs):
raise Crash()
|
<commit_before><commit_msg>Test that data volumes now survive a crash when recreating
Signed-off-by: Aanand Prasad <94fc4f3e3d0be608b3ed7b8529ff28d2a445cce1@gmail.com><commit_after>
|
from __future__ import unicode_literals
from __future__ import absolute_import
import mock
from compose.project import Project
from .testcases import DockerClientTestCase
class ResilienceTest(DockerClientTestCase):
def test_recreate_fails(self):
db = self.create_service('db', volumes=['/var/db'], command='top')
project = Project('composetest', [db], self.client)
container = db.create_container()
db.start_container(container)
host_path = container.get('Volumes')['/var/db']
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
with mock.patch('compose.service.Service.create_container', crash):
with self.assertRaises(Crash):
project.up()
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
class Crash(Exception):
pass
def crash(*args, **kwargs):
raise Crash()
|
Test that data volumes now survive a crash when recreating
Signed-off-by: Aanand Prasad <94fc4f3e3d0be608b3ed7b8529ff28d2a445cce1@gmail.com>from __future__ import unicode_literals
from __future__ import absolute_import
import mock
from compose.project import Project
from .testcases import DockerClientTestCase
class ResilienceTest(DockerClientTestCase):
def test_recreate_fails(self):
db = self.create_service('db', volumes=['/var/db'], command='top')
project = Project('composetest', [db], self.client)
container = db.create_container()
db.start_container(container)
host_path = container.get('Volumes')['/var/db']
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
with mock.patch('compose.service.Service.create_container', crash):
with self.assertRaises(Crash):
project.up()
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
class Crash(Exception):
pass
def crash(*args, **kwargs):
raise Crash()
|
<commit_before><commit_msg>Test that data volumes now survive a crash when recreating
Signed-off-by: Aanand Prasad <94fc4f3e3d0be608b3ed7b8529ff28d2a445cce1@gmail.com><commit_after>from __future__ import unicode_literals
from __future__ import absolute_import
import mock
from compose.project import Project
from .testcases import DockerClientTestCase
class ResilienceTest(DockerClientTestCase):
def test_recreate_fails(self):
db = self.create_service('db', volumes=['/var/db'], command='top')
project = Project('composetest', [db], self.client)
container = db.create_container()
db.start_container(container)
host_path = container.get('Volumes')['/var/db']
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
with mock.patch('compose.service.Service.create_container', crash):
with self.assertRaises(Crash):
project.up()
project.up()
container = db.containers()[0]
self.assertEqual(container.get('Volumes')['/var/db'], host_path)
class Crash(Exception):
pass
def crash(*args, **kwargs):
raise Crash()
|
|
8e175782c3b79e64d543fb478b146d308d2a2ad8
|
bin/oneoffs/cas_statistic.py
|
bin/oneoffs/cas_statistic.py
|
import os
import pymongo
from collections import Counter
db_uri = os.getenv('SCITRAN_PERSISTENT_DB_URI', 'localhost:9001')
db = pymongo.MongoClient(db_uri).get_database('scitran')
COLLECTIONS = ['projects', 'acquisitions', 'analyses']
COLLECTIONS_WITH_EMBEDDED = [('sessions', 'subject')]
def files_of_collection(collection, embedded_doc=None):
hash_size_pairs = []
cursor = db.get_collection(collection).find({})
for document in cursor:
hash_size_pairs += files_of_document(document)
if embedded_doc:
hash_size_pairs += files_of_document(document.get(embedded_doc, {}))
return hash_size_pairs
def files_of_document(document):
hash_size_pairs = []
files = document.get('files', [])
for f in files:
hash_size_pairs.append((f['hash'], f['size']))
return hash_size_pairs
def main():
hash_size_pairs = []
for collection in COLLECTIONS:
hash_size_pairs += files_of_collection(collection)
for collection, embedded_doc in COLLECTIONS_WITH_EMBEDDED:
hash_size_pairs += files_of_collection(collection, embedded_doc)
counter = Counter(hash_size_pairs)
size_with_cas = 0
size_wo_cas = 0
file_count_cas = len(counter)
file_count_wo_cas = 0
for hash_size_pair in counter:
size_with_cas += hash_size_pair[1]
size_wo_cas += hash_size_pair[1] * counter[hash_size_pair]
file_count_wo_cas += counter[hash_size_pair]
saved_disk_space = size_wo_cas - size_with_cas
print('Total size (CAS): %s Bytes' % size_with_cas)
print('Total size (wo CAS): %s Bytes' % size_wo_cas)
print('Number of files (CAS): %s' % file_count_cas)
print('Number of files (wo CAS): %s' % file_count_wo_cas)
print('Saved disk space: %s Bytes (%s%%)' % (
saved_disk_space, round(saved_disk_space / float(size_wo_cas) * 100, 2)))
if __name__ == '__main__':
main()
|
Add small python script which calculates how much disk space we save by using CAS
|
Add small python script which calculates how much disk space we save by using CAS
|
Python
|
mit
|
scitran/api,scitran/core,scitran/core,scitran/core,scitran/core,scitran/api
|
Add small python script which calculates how much disk space we save by using CAS
|
import os
import pymongo
from collections import Counter
db_uri = os.getenv('SCITRAN_PERSISTENT_DB_URI', 'localhost:9001')
db = pymongo.MongoClient(db_uri).get_database('scitran')
COLLECTIONS = ['projects', 'acquisitions', 'analyses']
COLLECTIONS_WITH_EMBEDDED = [('sessions', 'subject')]
def files_of_collection(collection, embedded_doc=None):
hash_size_pairs = []
cursor = db.get_collection(collection).find({})
for document in cursor:
hash_size_pairs += files_of_document(document)
if embedded_doc:
hash_size_pairs += files_of_document(document.get(embedded_doc, {}))
return hash_size_pairs
def files_of_document(document):
hash_size_pairs = []
files = document.get('files', [])
for f in files:
hash_size_pairs.append((f['hash'], f['size']))
return hash_size_pairs
def main():
hash_size_pairs = []
for collection in COLLECTIONS:
hash_size_pairs += files_of_collection(collection)
for collection, embedded_doc in COLLECTIONS_WITH_EMBEDDED:
hash_size_pairs += files_of_collection(collection, embedded_doc)
counter = Counter(hash_size_pairs)
size_with_cas = 0
size_wo_cas = 0
file_count_cas = len(counter)
file_count_wo_cas = 0
for hash_size_pair in counter:
size_with_cas += hash_size_pair[1]
size_wo_cas += hash_size_pair[1] * counter[hash_size_pair]
file_count_wo_cas += counter[hash_size_pair]
saved_disk_space = size_wo_cas - size_with_cas
print('Total size (CAS): %s Bytes' % size_with_cas)
print('Total size (wo CAS): %s Bytes' % size_wo_cas)
print('Number of files (CAS): %s' % file_count_cas)
print('Number of files (wo CAS): %s' % file_count_wo_cas)
print('Saved disk space: %s Bytes (%s%%)' % (
saved_disk_space, round(saved_disk_space / float(size_wo_cas) * 100, 2)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add small python script which calculates how much disk space we save by using CAS<commit_after>
|
import os
import pymongo
from collections import Counter
db_uri = os.getenv('SCITRAN_PERSISTENT_DB_URI', 'localhost:9001')
db = pymongo.MongoClient(db_uri).get_database('scitran')
COLLECTIONS = ['projects', 'acquisitions', 'analyses']
COLLECTIONS_WITH_EMBEDDED = [('sessions', 'subject')]
def files_of_collection(collection, embedded_doc=None):
hash_size_pairs = []
cursor = db.get_collection(collection).find({})
for document in cursor:
hash_size_pairs += files_of_document(document)
if embedded_doc:
hash_size_pairs += files_of_document(document.get(embedded_doc, {}))
return hash_size_pairs
def files_of_document(document):
hash_size_pairs = []
files = document.get('files', [])
for f in files:
hash_size_pairs.append((f['hash'], f['size']))
return hash_size_pairs
def main():
hash_size_pairs = []
for collection in COLLECTIONS:
hash_size_pairs += files_of_collection(collection)
for collection, embedded_doc in COLLECTIONS_WITH_EMBEDDED:
hash_size_pairs += files_of_collection(collection, embedded_doc)
counter = Counter(hash_size_pairs)
size_with_cas = 0
size_wo_cas = 0
file_count_cas = len(counter)
file_count_wo_cas = 0
for hash_size_pair in counter:
size_with_cas += hash_size_pair[1]
size_wo_cas += hash_size_pair[1] * counter[hash_size_pair]
file_count_wo_cas += counter[hash_size_pair]
saved_disk_space = size_wo_cas - size_with_cas
print('Total size (CAS): %s Bytes' % size_with_cas)
print('Total size (wo CAS): %s Bytes' % size_wo_cas)
print('Number of files (CAS): %s' % file_count_cas)
print('Number of files (wo CAS): %s' % file_count_wo_cas)
print('Saved disk space: %s Bytes (%s%%)' % (
saved_disk_space, round(saved_disk_space / float(size_wo_cas) * 100, 2)))
if __name__ == '__main__':
main()
|
Add small python script which calculates how much disk space we save by using CASimport os
import pymongo
from collections import Counter
db_uri = os.getenv('SCITRAN_PERSISTENT_DB_URI', 'localhost:9001')
db = pymongo.MongoClient(db_uri).get_database('scitran')
COLLECTIONS = ['projects', 'acquisitions', 'analyses']
COLLECTIONS_WITH_EMBEDDED = [('sessions', 'subject')]
def files_of_collection(collection, embedded_doc=None):
hash_size_pairs = []
cursor = db.get_collection(collection).find({})
for document in cursor:
hash_size_pairs += files_of_document(document)
if embedded_doc:
hash_size_pairs += files_of_document(document.get(embedded_doc, {}))
return hash_size_pairs
def files_of_document(document):
hash_size_pairs = []
files = document.get('files', [])
for f in files:
hash_size_pairs.append((f['hash'], f['size']))
return hash_size_pairs
def main():
hash_size_pairs = []
for collection in COLLECTIONS:
hash_size_pairs += files_of_collection(collection)
for collection, embedded_doc in COLLECTIONS_WITH_EMBEDDED:
hash_size_pairs += files_of_collection(collection, embedded_doc)
counter = Counter(hash_size_pairs)
size_with_cas = 0
size_wo_cas = 0
file_count_cas = len(counter)
file_count_wo_cas = 0
for hash_size_pair in counter:
size_with_cas += hash_size_pair[1]
size_wo_cas += hash_size_pair[1] * counter[hash_size_pair]
file_count_wo_cas += counter[hash_size_pair]
saved_disk_space = size_wo_cas - size_with_cas
print('Total size (CAS): %s Bytes' % size_with_cas)
print('Total size (wo CAS): %s Bytes' % size_wo_cas)
print('Number of files (CAS): %s' % file_count_cas)
print('Number of files (wo CAS): %s' % file_count_wo_cas)
print('Saved disk space: %s Bytes (%s%%)' % (
saved_disk_space, round(saved_disk_space / float(size_wo_cas) * 100, 2)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add small python script which calculates how much disk space we save by using CAS<commit_after>import os
import pymongo
from collections import Counter
db_uri = os.getenv('SCITRAN_PERSISTENT_DB_URI', 'localhost:9001')
db = pymongo.MongoClient(db_uri).get_database('scitran')
COLLECTIONS = ['projects', 'acquisitions', 'analyses']
COLLECTIONS_WITH_EMBEDDED = [('sessions', 'subject')]
def files_of_collection(collection, embedded_doc=None):
hash_size_pairs = []
cursor = db.get_collection(collection).find({})
for document in cursor:
hash_size_pairs += files_of_document(document)
if embedded_doc:
hash_size_pairs += files_of_document(document.get(embedded_doc, {}))
return hash_size_pairs
def files_of_document(document):
hash_size_pairs = []
files = document.get('files', [])
for f in files:
hash_size_pairs.append((f['hash'], f['size']))
return hash_size_pairs
def main():
hash_size_pairs = []
for collection in COLLECTIONS:
hash_size_pairs += files_of_collection(collection)
for collection, embedded_doc in COLLECTIONS_WITH_EMBEDDED:
hash_size_pairs += files_of_collection(collection, embedded_doc)
counter = Counter(hash_size_pairs)
size_with_cas = 0
size_wo_cas = 0
file_count_cas = len(counter)
file_count_wo_cas = 0
for hash_size_pair in counter:
size_with_cas += hash_size_pair[1]
size_wo_cas += hash_size_pair[1] * counter[hash_size_pair]
file_count_wo_cas += counter[hash_size_pair]
saved_disk_space = size_wo_cas - size_with_cas
print('Total size (CAS): %s Bytes' % size_with_cas)
print('Total size (wo CAS): %s Bytes' % size_wo_cas)
print('Number of files (CAS): %s' % file_count_cas)
print('Number of files (wo CAS): %s' % file_count_wo_cas)
print('Saved disk space: %s Bytes (%s%%)' % (
saved_disk_space, round(saved_disk_space / float(size_wo_cas) * 100, 2)))
if __name__ == '__main__':
main()
|
|
61677566ce685379456e7853c69a78ea32353422
|
static_precompiler/tests/conftest.py
|
static_precompiler/tests/conftest.py
|
from static_precompiler.settings import ROOT, OUTPUT_DIR
import shutil
import os
import pytest
@pytest.fixture(autouse=True)
def _no_output_dir(request):
""" Make sure that output dir does not exists. """
path = os.path.join(ROOT, OUTPUT_DIR)
if os.path.exists(path):
shutil.rmtree(path)
def fin():
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(fin)
|
Add auto fixture to make sure that output dir does not exists when tests are run
|
Add auto fixture to make sure that output dir does not exists when tests are run
|
Python
|
mit
|
liumengjun/django-static-precompiler,liumengjun/django-static-precompiler,paera/django-static-precompiler,jaheba/django-static-precompiler,paera/django-static-precompiler,jaheba/django-static-precompiler,paera/django-static-precompiler,jaheba/django-static-precompiler,paera/django-static-precompiler,jaheba/django-static-precompiler,liumengjun/django-static-precompiler,liumengjun/django-static-precompiler,liumengjun/django-static-precompiler
|
Add auto fixture to make sure that output dir does not exists when tests are run
|
from static_precompiler.settings import ROOT, OUTPUT_DIR
import shutil
import os
import pytest
@pytest.fixture(autouse=True)
def _no_output_dir(request):
""" Make sure that output dir does not exists. """
path = os.path.join(ROOT, OUTPUT_DIR)
if os.path.exists(path):
shutil.rmtree(path)
def fin():
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(fin)
|
<commit_before><commit_msg>Add auto fixture to make sure that output dir does not exists when tests are run<commit_after>
|
from static_precompiler.settings import ROOT, OUTPUT_DIR
import shutil
import os
import pytest
@pytest.fixture(autouse=True)
def _no_output_dir(request):
""" Make sure that output dir does not exists. """
path = os.path.join(ROOT, OUTPUT_DIR)
if os.path.exists(path):
shutil.rmtree(path)
def fin():
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(fin)
|
Add auto fixture to make sure that output dir does not exists when tests are runfrom static_precompiler.settings import ROOT, OUTPUT_DIR
import shutil
import os
import pytest
@pytest.fixture(autouse=True)
def _no_output_dir(request):
""" Make sure that output dir does not exists. """
path = os.path.join(ROOT, OUTPUT_DIR)
if os.path.exists(path):
shutil.rmtree(path)
def fin():
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(fin)
|
<commit_before><commit_msg>Add auto fixture to make sure that output dir does not exists when tests are run<commit_after>from static_precompiler.settings import ROOT, OUTPUT_DIR
import shutil
import os
import pytest
@pytest.fixture(autouse=True)
def _no_output_dir(request):
""" Make sure that output dir does not exists. """
path = os.path.join(ROOT, OUTPUT_DIR)
if os.path.exists(path):
shutil.rmtree(path)
def fin():
if os.path.exists(path):
shutil.rmtree(path)
request.addfinalizer(fin)
|
|
422b9458d26866b9f6692ddb0ccf2305c3ac6ea7
|
dev/surrogates/plots.py
|
dev/surrogates/plots.py
|
import darch.search_logging as sl
import darch.visualization as vi
import numpy as np
import seaborn as sns; sns.set()
# checking these across time.
log_lst = sl.read_search_folder('./logs/cifar10_medium/run-0')
xkey = 'epoch_number'
ykey = 'validation_accuracy'
num_lines = 8
time_plotter = vi.LinePlot(xlabel='time_in_minutes', ylabel=ykey)
epoch_plotter = vi.LinePlot(xlabel=xkey, ylabel=ykey)
for lg in sorted(log_lst, key=lambda x: x['results']['sequences'][ykey][-1], reverse=True)[:num_lines]:
r = lg['results']['sequences']
time_plotter.add_line(np.linspace(0.0, 120.0, len(r[xkey]) + 1)[1:], r[ykey])
epoch_plotter.add_line(r[xkey], r[ykey])
time_plotter.plot()
epoch_plotter.plot()
|
Add an extra file to the surrogates experiments.
|
Add an extra file to the surrogates experiments.
|
Python
|
mit
|
negrinho/deep_architect,negrinho/deep_architect
|
Add an extra file to the surrogates experiments.
|
import darch.search_logging as sl
import darch.visualization as vi
import numpy as np
import seaborn as sns; sns.set()
# checking these across time.
log_lst = sl.read_search_folder('./logs/cifar10_medium/run-0')
xkey = 'epoch_number'
ykey = 'validation_accuracy'
num_lines = 8
time_plotter = vi.LinePlot(xlabel='time_in_minutes', ylabel=ykey)
epoch_plotter = vi.LinePlot(xlabel=xkey, ylabel=ykey)
for lg in sorted(log_lst, key=lambda x: x['results']['sequences'][ykey][-1], reverse=True)[:num_lines]:
r = lg['results']['sequences']
time_plotter.add_line(np.linspace(0.0, 120.0, len(r[xkey]) + 1)[1:], r[ykey])
epoch_plotter.add_line(r[xkey], r[ykey])
time_plotter.plot()
epoch_plotter.plot()
|
<commit_before><commit_msg>Add an extra file to the surrogates experiments.<commit_after>
|
import darch.search_logging as sl
import darch.visualization as vi
import numpy as np
import seaborn as sns; sns.set()
# checking these across time.
log_lst = sl.read_search_folder('./logs/cifar10_medium/run-0')
xkey = 'epoch_number'
ykey = 'validation_accuracy'
num_lines = 8
time_plotter = vi.LinePlot(xlabel='time_in_minutes', ylabel=ykey)
epoch_plotter = vi.LinePlot(xlabel=xkey, ylabel=ykey)
for lg in sorted(log_lst, key=lambda x: x['results']['sequences'][ykey][-1], reverse=True)[:num_lines]:
r = lg['results']['sequences']
time_plotter.add_line(np.linspace(0.0, 120.0, len(r[xkey]) + 1)[1:], r[ykey])
epoch_plotter.add_line(r[xkey], r[ykey])
time_plotter.plot()
epoch_plotter.plot()
|
Add an extra file to the surrogates experiments.import darch.search_logging as sl
import darch.visualization as vi
import numpy as np
import seaborn as sns; sns.set()
# checking these across time.
log_lst = sl.read_search_folder('./logs/cifar10_medium/run-0')
xkey = 'epoch_number'
ykey = 'validation_accuracy'
num_lines = 8
time_plotter = vi.LinePlot(xlabel='time_in_minutes', ylabel=ykey)
epoch_plotter = vi.LinePlot(xlabel=xkey, ylabel=ykey)
for lg in sorted(log_lst, key=lambda x: x['results']['sequences'][ykey][-1], reverse=True)[:num_lines]:
r = lg['results']['sequences']
time_plotter.add_line(np.linspace(0.0, 120.0, len(r[xkey]) + 1)[1:], r[ykey])
epoch_plotter.add_line(r[xkey], r[ykey])
time_plotter.plot()
epoch_plotter.plot()
|
<commit_before><commit_msg>Add an extra file to the surrogates experiments.<commit_after>import darch.search_logging as sl
import darch.visualization as vi
import numpy as np
import seaborn as sns; sns.set()
# checking these across time.
log_lst = sl.read_search_folder('./logs/cifar10_medium/run-0')
xkey = 'epoch_number'
ykey = 'validation_accuracy'
num_lines = 8
time_plotter = vi.LinePlot(xlabel='time_in_minutes', ylabel=ykey)
epoch_plotter = vi.LinePlot(xlabel=xkey, ylabel=ykey)
for lg in sorted(log_lst, key=lambda x: x['results']['sequences'][ykey][-1], reverse=True)[:num_lines]:
r = lg['results']['sequences']
time_plotter.add_line(np.linspace(0.0, 120.0, len(r[xkey]) + 1)[1:], r[ykey])
epoch_plotter.add_line(r[xkey], r[ykey])
time_plotter.plot()
epoch_plotter.plot()
|
|
660e0955979b7d11b7442a00747673700413bf1d
|
scipy/ndimage/tests/test_splines.py
|
scipy/ndimage/tests/test_splines.py
|
"""Tests for spline filtering."""
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
def test_spline_filter_vs_matrix_solution(order):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order)
matrix = make_spline_knot_matrix(n, order)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
Add a test of spline filtering vs. matrix solving.
|
TST: Add a test of spline filtering vs. matrix solving.
|
Python
|
bsd-3-clause
|
mdhaber/scipy,rgommers/scipy,andyfaff/scipy,lhilt/scipy,mdhaber/scipy,jamestwebber/scipy,grlee77/scipy,jamestwebber/scipy,aeklant/scipy,perimosocordiae/scipy,perimosocordiae/scipy,person142/scipy,zerothi/scipy,WarrenWeckesser/scipy,anntzer/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,andyfaff/scipy,vigna/scipy,jor-/scipy,tylerjereddy/scipy,lhilt/scipy,person142/scipy,Eric89GXL/scipy,andyfaff/scipy,lhilt/scipy,Stefan-Endres/scipy,endolith/scipy,ilayn/scipy,arokem/scipy,e-q/scipy,WarrenWeckesser/scipy,Stefan-Endres/scipy,anntzer/scipy,scipy/scipy,Eric89GXL/scipy,e-q/scipy,arokem/scipy,mdhaber/scipy,nmayorov/scipy,person142/scipy,zerothi/scipy,tylerjereddy/scipy,aarchiba/scipy,matthew-brett/scipy,matthew-brett/scipy,ilayn/scipy,aeklant/scipy,perimosocordiae/scipy,anntzer/scipy,WarrenWeckesser/scipy,jor-/scipy,zerothi/scipy,andyfaff/scipy,jamestwebber/scipy,WarrenWeckesser/scipy,aarchiba/scipy,vigna/scipy,pizzathief/scipy,rgommers/scipy,anntzer/scipy,WarrenWeckesser/scipy,aarchiba/scipy,gfyoung/scipy,rgommers/scipy,ilayn/scipy,pizzathief/scipy,scipy/scipy,pizzathief/scipy,zerothi/scipy,anntzer/scipy,andyfaff/scipy,aarchiba/scipy,lhilt/scipy,e-q/scipy,Stefan-Endres/scipy,nmayorov/scipy,tylerjereddy/scipy,aarchiba/scipy,endolith/scipy,jor-/scipy,nmayorov/scipy,scipy/scipy,vigna/scipy,gfyoung/scipy,tylerjereddy/scipy,arokem/scipy,mdhaber/scipy,matthew-brett/scipy,gertingold/scipy,person142/scipy,zerothi/scipy,matthew-brett/scipy,arokem/scipy,jamestwebber/scipy,scipy/scipy,vigna/scipy,scipy/scipy,rgommers/scipy,jamestwebber/scipy,aeklant/scipy,e-q/scipy,e-q/scipy,mdhaber/scipy,jor-/scipy,gertingold/scipy,aeklant/scipy,gertingold/scipy,zerothi/scipy,gfyoung/scipy,Stefan-Endres/scipy,rgommers/scipy,anntzer/scipy,aeklant/scipy,WarrenWeckesser/scipy,gfyoung/scipy,vigna/scipy,nmayorov/scipy,pizzathief/scipy,gfyoung/scipy,perimosocordiae/scipy,Eric89GXL/scipy,grlee77/scipy,mdhaber/scipy,matthew-brett/scipy,lhilt/scipy,grlee77/scipy,perimosocordiae/scipy,endolith/scipy,gertingold/scipy,ilayn/scipy,endolith/scipy,Stefan-Endres/scipy,gertingold/scipy,jor-/scipy,person142/scipy,andyfaff/scipy,endolith/scipy,pizzathief/scipy,grlee77/scipy,ilayn/scipy,Eric89GXL/scipy,arokem/scipy,nmayorov/scipy,grlee77/scipy,tylerjereddy/scipy,scipy/scipy,Eric89GXL/scipy,ilayn/scipy,endolith/scipy,Eric89GXL/scipy
|
TST: Add a test of spline filtering vs. matrix solving.
|
"""Tests for spline filtering."""
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
def test_spline_filter_vs_matrix_solution(order):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order)
matrix = make_spline_knot_matrix(n, order)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
<commit_before><commit_msg>TST: Add a test of spline filtering vs. matrix solving.<commit_after>
|
"""Tests for spline filtering."""
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
def test_spline_filter_vs_matrix_solution(order):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order)
matrix = make_spline_knot_matrix(n, order)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
TST: Add a test of spline filtering vs. matrix solving."""Tests for spline filtering."""
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
def test_spline_filter_vs_matrix_solution(order):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order)
matrix = make_spline_knot_matrix(n, order)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
<commit_before><commit_msg>TST: Add a test of spline filtering vs. matrix solving.<commit_after>"""Tests for spline filtering."""
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
def test_spline_filter_vs_matrix_solution(order):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order)
matrix = make_spline_knot_matrix(n, order)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))
|
|
48fc7cad7eb4cec0b928aba3daca7e934d46d87c
|
functest/tests/unit/features/test_sdnvpn.py
|
functest/tests/unit/features/test_sdnvpn.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import sdnvpn
from functest.utils import constants
class SdnVpnTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sdnvpn = sdnvpn.SdnVpnTests()
def test_init(self):
self.assertEqual(self.sdnvpn.project_name, "sdnvpn")
self.assertEqual(self.sdnvpn.case_name, "bgpvpn")
self.assertEqual(
self.sdnvpn.repo,
constants.CONST.__getattribute__("dir_repo_sdnvpn"))
self.assertEqual(
self.sdnvpn.cmd,
'cd {}/sdnvpn/test/functest && python ./run_tests.py'.format(
self.sdnvpn.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for sdnvpn
|
Add unit tests for sdnvpn
Change-Id: Ie4ebc4e2bc6f2e66f5f567f45f44c073cd9d313d
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
opnfv/functest,mywulin/functest,mywulin/functest,opnfv/functest
|
Add unit tests for sdnvpn
Change-Id: Ie4ebc4e2bc6f2e66f5f567f45f44c073cd9d313d
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import sdnvpn
from functest.utils import constants
class SdnVpnTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sdnvpn = sdnvpn.SdnVpnTests()
def test_init(self):
self.assertEqual(self.sdnvpn.project_name, "sdnvpn")
self.assertEqual(self.sdnvpn.case_name, "bgpvpn")
self.assertEqual(
self.sdnvpn.repo,
constants.CONST.__getattribute__("dir_repo_sdnvpn"))
self.assertEqual(
self.sdnvpn.cmd,
'cd {}/sdnvpn/test/functest && python ./run_tests.py'.format(
self.sdnvpn.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for sdnvpn
Change-Id: Ie4ebc4e2bc6f2e66f5f567f45f44c073cd9d313d
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import sdnvpn
from functest.utils import constants
class SdnVpnTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sdnvpn = sdnvpn.SdnVpnTests()
def test_init(self):
self.assertEqual(self.sdnvpn.project_name, "sdnvpn")
self.assertEqual(self.sdnvpn.case_name, "bgpvpn")
self.assertEqual(
self.sdnvpn.repo,
constants.CONST.__getattribute__("dir_repo_sdnvpn"))
self.assertEqual(
self.sdnvpn.cmd,
'cd {}/sdnvpn/test/functest && python ./run_tests.py'.format(
self.sdnvpn.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for sdnvpn
Change-Id: Ie4ebc4e2bc6f2e66f5f567f45f44c073cd9d313d
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import sdnvpn
from functest.utils import constants
class SdnVpnTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sdnvpn = sdnvpn.SdnVpnTests()
def test_init(self):
self.assertEqual(self.sdnvpn.project_name, "sdnvpn")
self.assertEqual(self.sdnvpn.case_name, "bgpvpn")
self.assertEqual(
self.sdnvpn.repo,
constants.CONST.__getattribute__("dir_repo_sdnvpn"))
self.assertEqual(
self.sdnvpn.cmd,
'cd {}/sdnvpn/test/functest && python ./run_tests.py'.format(
self.sdnvpn.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for sdnvpn
Change-Id: Ie4ebc4e2bc6f2e66f5f567f45f44c073cd9d313d
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import sdnvpn
from functest.utils import constants
class SdnVpnTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sdnvpn = sdnvpn.SdnVpnTests()
def test_init(self):
self.assertEqual(self.sdnvpn.project_name, "sdnvpn")
self.assertEqual(self.sdnvpn.case_name, "bgpvpn")
self.assertEqual(
self.sdnvpn.repo,
constants.CONST.__getattribute__("dir_repo_sdnvpn"))
self.assertEqual(
self.sdnvpn.cmd,
'cd {}/sdnvpn/test/functest && python ./run_tests.py'.format(
self.sdnvpn.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
a91b633ba88a01b12305fdfafd570c0b3776b42d
|
utils/print_num_errors.py
|
utils/print_num_errors.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
|
Add a tool script to print errors statistics in output JSON files.
|
Add a tool script to print errors statistics in output JSON files.
|
Python
|
mit
|
jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts
|
Add a tool script to print errors statistics in output JSON files.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
|
<commit_before><commit_msg>Add a tool script to print errors statistics in output JSON files.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
|
Add a tool script to print errors statistics in output JSON files.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
|
<commit_before><commit_msg>Add a tool script to print errors statistics in output JSON files.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
|
|
0b47397b91fec94910f18ea1711184ecfd0f6bf0
|
jacquard/storage/tests/test_file.py
|
jacquard/storage/tests/test_file.py
|
from jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
|
Add tests for file storage engine
|
Add tests for file storage engine
|
Python
|
mit
|
prophile/jacquard,prophile/jacquard
|
Add tests for file storage engine
|
from jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
|
<commit_before><commit_msg>Add tests for file storage engine<commit_after>
|
from jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
|
Add tests for file storage enginefrom jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
|
<commit_before><commit_msg>Add tests for file storage engine<commit_after>from jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
|
|
dc1d43acb5730bd9b555b63aa589b0eeceb14e52
|
test/stop-hook/TestStopHookCmd.py
|
test/stop-hook/TestStopHookCmd.py
|
"""
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case to exercise the 'target stop-hook add' command without relying on pexpect to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **' message emitted by the Target implementation is invoked asynchronously and is using a separate:
|
Add a test case to exercise the 'target stop-hook add' command without relying on pexpect
to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **'
message emitted by the Target implementation is invoked asynchronously and is using a separate:
CommandReturnObject result;
command return object that what the driver passes to the normal command interpreter loop.
But it can help test our output serialization work.
I need to modify the test case later to maybe only test that "-o 'expr ptr'" option does indeed work.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130742 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb
|
Add a test case to exercise the 'target stop-hook add' command without relying on pexpect
to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **'
message emitted by the Target implementation is invoked asynchronously and is using a separate:
CommandReturnObject result;
command return object that what the driver passes to the normal command interpreter loop.
But it can help test our output serialization work.
I need to modify the test case later to maybe only test that "-o 'expr ptr'" option does indeed work.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130742 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case to exercise the 'target stop-hook add' command without relying on pexpect
to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **'
message emitted by the Target implementation is invoked asynchronously and is using a separate:
CommandReturnObject result;
command return object that what the driver passes to the normal command interpreter loop.
But it can help test our output serialization work.
I need to modify the test case later to maybe only test that "-o 'expr ptr'" option does indeed work.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130742 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case to exercise the 'target stop-hook add' command without relying on pexpect
to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **'
message emitted by the Target implementation is invoked asynchronously and is using a separate:
CommandReturnObject result;
command return object that what the driver passes to the normal command interpreter loop.
But it can help test our output serialization work.
I need to modify the test case later to maybe only test that "-o 'expr ptr'" option does indeed work.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130742 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case to exercise the 'target stop-hook add' command without relying on pexpect
to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **'
message emitted by the Target implementation is invoked asynchronously and is using a separate:
CommandReturnObject result;
command return object that what the driver passes to the normal command interpreter loop.
But it can help test our output serialization work.
I need to modify the test case later to maybe only test that "-o 'expr ptr'" option does indeed work.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130742 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
250e0d2d0e2264b83a82548df3b30dbc784a4fe5
|
docker-registry-show.py
|
docker-registry-show.py
|
"""
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
|
Add some example client code
|
Add some example client code
|
Python
|
apache-2.0
|
twaugh/docker-registry-client,yodle/docker-registry-client
|
Add some example client code
|
"""
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add some example client code<commit_after>
|
"""
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
|
Add some example client code"""
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add some example client code<commit_after>"""
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
|
|
83bcb62c98c406e2aa6ce6a9a98750d0b565f750
|
tests/unit/test_raw_generichash.py
|
tests/unit/test_raw_generichash.py
|
# Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
|
Add tests for generic hash
|
Add tests for generic hash
|
Python
|
apache-2.0
|
mindw/libnacl,johnttan/libnacl,cachedout/libnacl,coinkite/libnacl,saltstack/libnacl,RaetProtocol/libnacl
|
Add tests for generic hash
|
# Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
|
<commit_before><commit_msg>Add tests for generic hash<commit_after>
|
# Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
|
Add tests for generic hash# Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
|
<commit_before><commit_msg>Add tests for generic hash<commit_after># Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
|
|
064124d09973dc58a444d22aa7c47acf94f8fa81
|
data/bigramfreq.py
|
data/bigramfreq.py
|
import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add a script to generate JSON bigram frequencies for English
|
Add a script to generate JSON bigram frequencies for English
|
Python
|
apache-2.0
|
Kitware/clique,Kitware/clique,XDATA-Year-3/clique,XDATA-Year-3/clique,Kitware/clique,XDATA-Year-3/clique
|
Add a script to generate JSON bigram frequencies for English
|
import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a script to generate JSON bigram frequencies for English<commit_after>
|
import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add a script to generate JSON bigram frequencies for Englishimport json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a script to generate JSON bigram frequencies for English<commit_after>import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
75131bdf806c56970f3160de3e6d476d9ecbc3a7
|
python/deleteNodeInALinkedList.py
|
python/deleteNodeInALinkedList.py
|
# https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next
|
Add problem delete note in a linked list
|
Add problem delete note in a linked list
|
Python
|
mit
|
guozengxin/myleetcode,guozengxin/myleetcode
|
Add problem delete note in a linked list
|
# https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next
|
<commit_before><commit_msg>Add problem delete note in a linked list<commit_after>
|
# https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next
|
Add problem delete note in a linked list# https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next
|
<commit_before><commit_msg>Add problem delete note in a linked list<commit_after># https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next
|
|
1ede9bd211cd8ea6aac4db6f8818804cb778a022
|
dinosaurs/views.py
|
dinosaurs/views.py
|
import os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
|
Add a view that serves a single static file
|
Add a view that serves a single static file
|
Python
|
mit
|
chrisseto/dinosaurs.sexy,chrisseto/dinosaurs.sexy
|
Add a view that serves a single static file
|
import os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
|
<commit_before><commit_msg>Add a view that serves a single static file<commit_after>
|
import os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
|
Add a view that serves a single static fileimport os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
|
<commit_before><commit_msg>Add a view that serves a single static file<commit_after>import os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
|
|
06e7dd815a77739089b2ad0aed5cb9f01a194967
|
Normalize_Image.py
|
Normalize_Image.py
|
# @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
|
Add script to normalize image using Ops
|
Add script to normalize image using Ops
|
Python
|
bsd-2-clause
|
bic-kn/imagej-scripts
|
Add script to normalize image using Ops
|
# @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
|
<commit_before><commit_msg>Add script to normalize image using Ops<commit_after>
|
# @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
|
Add script to normalize image using Ops# @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
|
<commit_before><commit_msg>Add script to normalize image using Ops<commit_after># @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
|
|
6d4efa0bd1199bbe900a8913b829ca7201dde6ab
|
openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py
|
openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py
|
# -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
|
Add migration to add new Juniper SASS vars to sites
|
Add migration to add new Juniper SASS vars to sites
fix bug
|
Python
|
agpl-3.0
|
appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform
|
Add migration to add new Juniper SASS vars to sites
fix bug
|
# -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
|
<commit_before><commit_msg>Add migration to add new Juniper SASS vars to sites
fix bug<commit_after>
|
# -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
|
Add migration to add new Juniper SASS vars to sites
fix bug# -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
|
<commit_before><commit_msg>Add migration to add new Juniper SASS vars to sites
fix bug<commit_after># -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
|
|
190df1378844c6294c6f48ad6cb0272f2146fc48
|
examples/force_https.py
|
examples/force_https.py
|
"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
|
Add example of force https
|
Add example of force https
|
Python
|
mit
|
timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug
|
Add example of force https
|
"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
|
<commit_before><commit_msg>Add example of force https<commit_after>
|
"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
|
Add example of force https"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
|
<commit_before><commit_msg>Add example of force https<commit_after>"""An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
|
|
876365a7f19a3786db15dc7debbd2686fa5d02ef
|
wmata.py
|
wmata.py
|
import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
|
Add WmataError class and start of Wmata class.
|
Add WmataError class and start of Wmata class.
Starts module code. Error class is added, and the main class, Wmata, is added,
with the default class variables and the init function added.
|
Python
|
mit
|
ExperimentMonty/py3-wmata
|
Add WmataError class and start of Wmata class.
Starts module code. Error class is added, and the main class, Wmata, is added,
with the default class variables and the init function added.
|
import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
|
<commit_before><commit_msg>Add WmataError class and start of Wmata class.
Starts module code. Error class is added, and the main class, Wmata, is added,
with the default class variables and the init function added.<commit_after>
|
import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
|
Add WmataError class and start of Wmata class.
Starts module code. Error class is added, and the main class, Wmata, is added,
with the default class variables and the init function added.import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
|
<commit_before><commit_msg>Add WmataError class and start of Wmata class.
Starts module code. Error class is added, and the main class, Wmata, is added,
with the default class variables and the init function added.<commit_after>import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
|
|
a4ab01d64c505b786e6fef217829fb56c3d6b6ce
|
mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py
|
mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py
|
import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
|
Add management script to generate hansard appearance scores.
|
Add management script to generate hansard appearance scores.
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,Hutspace/odekro,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,hzj123/56th,hzj123/56th,Hutspace/odekro,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,Hutspace/odekro,hzj123/56th,patricmutwiri/pombola,Hutspace/odekro,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,Hutspace/odekro,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola
|
Add management script to generate hansard appearance scores.
|
import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
|
<commit_before><commit_msg>Add management script to generate hansard appearance scores.<commit_after>
|
import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
|
Add management script to generate hansard appearance scores.import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
|
<commit_before><commit_msg>Add management script to generate hansard appearance scores.<commit_after>import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
|
|
4a404709081515fa0cc91683b5a9ad8f6a68eae6
|
migrations/versions/630_remove_mandatory_assessment_methods_.py
|
migrations/versions/630_remove_mandatory_assessment_methods_.py
|
"""Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
|
Add a migration to drop mandatory assessment methods from brief data
|
Add a migration to drop mandatory assessment methods from brief data
Removes work history and written proposal from all briefs, since
they're no longer a valid option for evaluationType and are added
automatically to all briefs.
Downgrade will add the options to all briefs based on lot. Since we
don't know if the brief had the option selected before the upgrade
migration we add them to ALL briefs, including draft ones. This means
we might end up adding it to briefs that didn't have it before, but
that seems better than silently removing the option, as all briefs
should have them selected in the end.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add a migration to drop mandatory assessment methods from brief data
Removes work history and written proposal from all briefs, since
they're no longer a valid option for evaluationType and are added
automatically to all briefs.
Downgrade will add the options to all briefs based on lot. Since we
don't know if the brief had the option selected before the upgrade
migration we add them to ALL briefs, including draft ones. This means
we might end up adding it to briefs that didn't have it before, but
that seems better than silently removing the option, as all briefs
should have them selected in the end.
|
"""Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
|
<commit_before><commit_msg>Add a migration to drop mandatory assessment methods from brief data
Removes work history and written proposal from all briefs, since
they're no longer a valid option for evaluationType and are added
automatically to all briefs.
Downgrade will add the options to all briefs based on lot. Since we
don't know if the brief had the option selected before the upgrade
migration we add them to ALL briefs, including draft ones. This means
we might end up adding it to briefs that didn't have it before, but
that seems better than silently removing the option, as all briefs
should have them selected in the end.<commit_after>
|
"""Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
|
Add a migration to drop mandatory assessment methods from brief data
Removes work history and written proposal from all briefs, since
they're no longer a valid option for evaluationType and are added
automatically to all briefs.
Downgrade will add the options to all briefs based on lot. Since we
don't know if the brief had the option selected before the upgrade
migration we add them to ALL briefs, including draft ones. This means
we might end up adding it to briefs that didn't have it before, but
that seems better than silently removing the option, as all briefs
should have them selected in the end."""Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
|
<commit_before><commit_msg>Add a migration to drop mandatory assessment methods from brief data
Removes work history and written proposal from all briefs, since
they're no longer a valid option for evaluationType and are added
automatically to all briefs.
Downgrade will add the options to all briefs based on lot. Since we
don't know if the brief had the option selected before the upgrade
migration we add them to ALL briefs, including draft ones. This means
we might end up adding it to briefs that didn't have it before, but
that seems better than silently removing the option, as all briefs
should have them selected in the end.<commit_after>"""Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
|
|
6fdf7cc68e05ce6e8e18306eca7d8e36d1a166ea
|
hotline/db/db_client.py
|
hotline/db/db_client.py
|
import importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
|
Add Client class to abstract from different datbase clients
|
Add Client class to abstract from different datbase clients
|
Python
|
mit
|
wearhacks/hackathon_hotline
|
Add Client class to abstract from different datbase clients
|
import importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
|
<commit_before><commit_msg>Add Client class to abstract from different datbase clients<commit_after>
|
import importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
|
Add Client class to abstract from different datbase clientsimport importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
|
<commit_before><commit_msg>Add Client class to abstract from different datbase clients<commit_after>import importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
|
|
ecbc691307c43ad06d7f539f008fccbff690d538
|
unit_tests/test_precomputed_io.py
|
unit_tests/test_precomputed_io.py
|
# Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
|
Add unit tests for the precomputed_io module
|
Add unit tests for the precomputed_io module
|
Python
|
mit
|
HumanBrainProject/neuroglancer-scripts
|
Add unit tests for the precomputed_io module
|
# Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
|
<commit_before><commit_msg>Add unit tests for the precomputed_io module<commit_after>
|
# Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
|
Add unit tests for the precomputed_io module# Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
|
<commit_before><commit_msg>Add unit tests for the precomputed_io module<commit_after># Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
|
|
885ed1e8e3256352d2fde771bef57997809c3c1e
|
migrations/versions/0209_remove_monthly_billing_.py
|
migrations/versions/0209_remove_monthly_billing_.py
|
"""
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
|
Remove monthly_billing table from the database
|
Remove monthly_billing table from the database
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove monthly_billing table from the database
|
"""
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove monthly_billing table from the database<commit_after>
|
"""
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
|
Remove monthly_billing table from the database"""
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove monthly_billing table from the database<commit_after>"""
Revision ID: 0209_remove_monthly_billing
Revises: 84c3b6eb16b3
Create Date: 2018-07-27 14:46:30.109811
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0209_remove_monthly_billing'
down_revision = '84c3b6eb16b3'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')
op.drop_table('monthly_billing')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('monthly_billing',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),
sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], name='monthly_billing_service_id_fkey'),
sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),
sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')
)
op.create_index('ix_monthly_billing_service_id', 'monthly_billing', ['service_id'], unique=False)
# ### end Alembic commands ###
|
|
84ae279c0044e63e00c7d21823c3159e34c73d03
|
scripts/uvfits_memtest.py
|
scripts/uvfits_memtest.py
|
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
|
Add a memory test script
|
Add a memory test script
|
Python
|
bsd-2-clause
|
HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata,HERA-Team/pyuvdata
|
Add a memory test script
|
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
|
<commit_before><commit_msg>Add a memory test script<commit_after>
|
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
|
Add a memory test script#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
|
<commit_before><commit_msg>Add a memory test script<commit_after>#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from memory_profiler import profile
import numpy as np
from astropy import constants as const
from astropy.io import fits
from pyuvdata import UVData
@profile
def read_uvfits():
filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'
# first test uvdata.read_uvfits. First read metadata then full data
uv_obj = UVData()
uv_obj.read_uvfits(filename, metadata_only=True)
uv_obj.read_uvfits_data(filename)
del(uv_obj)
# now test details with astropy
hdu_list = fits.open(filename, memmap=True)
vis_hdu = hdu_list[0]
# only read in times, then uvws, then visibilities
time0_array = vis_hdu.data.par('date')
uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),
vis_hdu.data.par('VV'),
vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T
if vis_hdu.header['NAXIS'] == 7:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, :, 1])
else:
data_array = (vis_hdu.data.data[:, 0, 0, :, :, 0] +
1j * vis_hdu.data.data[:, 0, 0, :, :, 1])
data_array = data_array[:, np.newaxis, :, :]
# test for releasing resources
del(time0_array)
del(uvw_array)
del(data_array)
# release file handles
del(vis_hdu)
del(hdu_list)
del(filename)
return
if __name__ == '__main__':
read_uvfits()
|
|
4c60f8f643fe05b69ca475242d8c46b02697d5d4
|
examples/howto/type_chain.py
|
examples/howto/type_chain.py
|
from thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
|
Add example for type-checking chain
|
Add example for type-checking chain
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
Add example for type-checking chain
|
from thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
|
<commit_before><commit_msg>Add example for type-checking chain<commit_after>
|
from thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
|
Add example for type-checking chainfrom thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
|
<commit_before><commit_msg>Add example for type-checking chain<commit_after>from thinc.api import chain, ReLu, MaxPool, Softmax, chain
# This example should be run with mypy. This is an example of type-level checking
# for network validity.
#
# We first define an invalid network.
# It's invalid because MaxPool expects Floats3d as input, while ReLu produces
# Floats2d as output. chain has type-logic to verify input and output types
# line up.
#
# You should see the error an error,
# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of "chain"
bad_model = chain(ReLu(10), MaxPool(), Softmax())
# Now let's try it with a network that does work, just to be sure.
good_model = chain(ReLu(10), ReLu(10), Softmax())
# Finally we can reveal_type on the good model, to see what it thinks.
reveal_type(good_model)
|
|
c4ee87fa4398eca3193331888086cb437436722e
|
test/hil_client_test.py
|
test/hil_client_test.py
|
"""
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
|
Add some tests for hil_client
|
Add some tests for hil_client
|
Python
|
mit
|
mghpcc-projects/user_level_slurm_reservations,mghpcc-projects/user_level_slurm_reservations
|
Add some tests for hil_client
|
"""
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
|
<commit_before><commit_msg>Add some tests for hil_client<commit_after>
|
"""
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
|
Add some tests for hil_client"""
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
|
<commit_before><commit_msg>Add some tests for hil_client<commit_after>"""
General info about these tests
The tests assusme that the nodes are in the <from_project> which is set to be the
"slurm" project, since that is what we are testing here.
If all tests pass successfully, then nodes are back in their original state.
Class TestHILReserve moves nodes out of the slurm project and into the free pool;
and TestHILRelease puts nodes back into the slurm project from the free pool
run the tests like this
py.test <path to testfile>
py.test hil_client_test
"""
import inspect
import sys
import pytest
import requests
from os.path import realpath, dirname, isfile, join
import uuid
libdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))
sys.path.append(libdir)
import hil_slurm_client
# Some constants useful for tests
nodelist = ['slurm-compute1', 'slurm-compute2', 'slurm-compute3']
hil_client = hil_slurm_client.hil_init()
to_project = 'slurm'
from_project = 'slurm'
bad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',
'baduser', 'badpassword')
class TestHILReserve:
"""Tests various hil_reserve cases"""
def test_hil_reserve_success(self):
"""test the regular success scenario"""
# should raise an error if <from_project> doesn't add up.
with pytest.raises(hil_slurm_client.ProjectMismatchError):
random_project = str(uuid.uuid4())
hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)
# should run without any errors
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)
class TestHILRelease:
"""Test various hil_release cases"""
def test_hil_release(self):
# should raise error if a bad hil_client is passed
with pytest.raises(requests.ConnectionError):
hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)
# calling it with a functioning hil_client should work
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
# At this point, nodes are already owned by the <to_project>
# calling it again should have no affect.
hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)
|
|
15eb41ba9ac22eb2ecc60b82807ca7f333f578b9
|
iatidq/dqusers.py
|
iatidq/dqusers.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
|
Add basic methods for accessing user data
|
Add basic methods for accessing user data
|
Python
|
agpl-3.0
|
pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality
|
Add basic methods for accessing user data
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
|
<commit_before><commit_msg>Add basic methods for accessing user data<commit_after>
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
|
Add basic methods for accessing user data
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
|
<commit_before><commit_msg>Add basic methods for accessing user data<commit_after>
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def user(user_id=None):
if user_id:
user = models.User.query.filter_by(id=user_id
).first()
return user
return None
def user_by_username(username=None):
if username:
user = models.User.query.filter_by(username=username
).first()
return user
return None
def addUser(data):
checkU = models.User.query.filter_by(username=data["username"]
).first()
if not checkU:
newU = models.User()
newU.setup(
username = data["username"],
password = app.config["ADMIN_PASSWORD"],
name = data.get('name'),
email_address = data.get('name')
)
db.session.add(newU)
db.session.commit()
return user
return None
|
|
287cd795c92a86ee16a623230d0c59732a2f767d
|
examples/vtk-unstructured-points.py
|
examples/vtk-unstructured-points.py
|
import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
|
Add demo on how to write unstructured point meshes in Vtk.
|
Add demo on how to write unstructured point meshes in Vtk.
|
Python
|
mit
|
inducer/pyvisfile,inducer/pyvisfile,inducer/pyvisfile
|
Add demo on how to write unstructured point meshes in Vtk.
|
import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
|
<commit_before><commit_msg>Add demo on how to write unstructured point meshes in Vtk.<commit_after>
|
import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
|
Add demo on how to write unstructured point meshes in Vtk.import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
|
<commit_before><commit_msg>Add demo on how to write unstructured point meshes in Vtk.<commit_after>import numpy as np
from pyvisfile.vtk import (
UnstructuredGrid, DataArray,
AppendedDataXMLGenerator,
VTK_VERTEX, VF_LIST_OF_VECTORS, VF_LIST_OF_COMPONENTS)
n = 5000
points = np.random.randn(n, 3)
data = [
("p", np.random.randn(n)),
("vel", np.random.randn(3, n)),
]
file_name = "points.vtu"
compressor = None
grid = UnstructuredGrid(
(n, DataArray("points", points, vector_format=VF_LIST_OF_VECTORS)),
cells=np.arange(n, dtype=np.uint32),
cell_types=np.asarray([VTK_VERTEX] * n, dtype=np.uint8))
for name, field in data:
grid.add_pointdata(DataArray(name, field,
vector_format=VF_LIST_OF_COMPONENTS))
from os.path import exists
if exists(file_name):
raise RuntimeError("output file '%s' already exists"
% file_name)
outf = open(file_name, "w")
AppendedDataXMLGenerator(compressor)(grid).write(outf)
outf.close()
|
|
11111351f67afd3dc8ee2ec904a9cea595d68fb3
|
DilipadTopicModelling/experiment_calculate_perplexity.py
|
DilipadTopicModelling/experiment_calculate_perplexity.py
|
import pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
|
Add script to calculate perplexity results
|
Add script to calculate perplexity results
Added a script that calculates perplexity at intervals for different
configurations of nTopics and save the results to csv
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to calculate perplexity results
Added a script that calculates perplexity at intervals for different
configurations of nTopics and save the results to csv
|
import pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
|
<commit_before><commit_msg>Add script to calculate perplexity results
Added a script that calculates perplexity at intervals for different
configurations of nTopics and save the results to csv<commit_after>
|
import pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
|
Add script to calculate perplexity results
Added a script that calculates perplexity at intervals for different
configurations of nTopics and save the results to csvimport pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
|
<commit_before><commit_msg>Add script to calculate perplexity results
Added a script that calculates perplexity at intervals for different
configurations of nTopics and save the results to csv<commit_after>import pandas as pd
import logging
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir))
#corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
# topicDict='{}/topicDict.dict'.format(data_dir),
# opinionDict='{}/opinionDict.dict'.format(data_dir))
nIter = 200
beta = 0.02
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
nTopics = range(20, nIter+1, 20)
nPerplexity = range(0, nIter+1, 10)
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for n in nTopics:
# load sampler
sampler = GibbsSampler(corpus, nTopics=n, nIter=nIter, alpha=(50.0/n),
beta=beta, beta_o=beta,
out_dir=out_dir.format(n))
sampler._initialize()
sampler.run()
for s in nPerplexity:
tw_perp, ow_perp = sampler.perplexity(index=s)
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
logger.info('nTopics: {}, nPerplexity: {}, topic perplexity: {}, '
'opinion perplexity: {}'.format(n, s, tw_perp, ow_perp))
topic_perp.to_csv(out_dir.format('perplexity_topic.csv'))
opinion_perp.to_csv(out_dir.format('perplexity_opinion.csv'))
|
|
71a55a1252ef87629f10e48c1041416c34742ea7
|
modules/juliet_input.py
|
modules/juliet_input.py
|
from threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
|
Add input handling for ssh connections
|
Add input handling for ssh connections
|
Python
|
bsd-2-clause
|
halfbro/juliet
|
Add input handling for ssh connections
|
from threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
|
<commit_before><commit_msg>Add input handling for ssh connections<commit_after>
|
from threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
|
Add input handling for ssh connectionsfrom threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
|
<commit_before><commit_msg>Add input handling for ssh connections<commit_after>from threading import Thread
class Juliet_Input (Thread):
def __init(self):
Thread.__init(self)
def run(self):
while True:
char = raw_input()
if char == 'q':
break
|
|
0115d088061595fe6c6f8589d0599d1b8e970813
|
scripts/lwtnn-build-dummy-inputs.py
|
scripts/lwtnn-build-dummy-inputs.py
|
#!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
|
Add dummy Keras inputs builder
|
Add dummy Keras inputs builder
|
Python
|
mit
|
lwtnn/lwtnn,jwsmithers/lwtnn,jwsmithers/lwtnn,jwsmithers/lwtnn,lwtnn/lwtnn,lwtnn/lwtnn
|
Add dummy Keras inputs builder
|
#!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
|
<commit_before><commit_msg>Add dummy Keras inputs builder<commit_after>
|
#!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
|
Add dummy Keras inputs builder#!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
|
<commit_before><commit_msg>Add dummy Keras inputs builder<commit_after>#!/usr/bin/env python3
"""Generate fake NN files to test the lightweight classes"""
import argparse
import json
import h5py
import numpy as np
def _run():
args = _get_args()
_build_keras_arch("arch.json")
_build_keras_inputs_file("inputs.json")
_build_keras_weights("weights.h5", verbose=args.verbose)
def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
return parser.parse_args()
def _build_keras_arch(name):
arch = {
'layers': [
{'activation': 'relu', 'name': 'Dense'}
]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(arch, indent=2))
def _build_keras_inputs_file(name):
def build_input(num):
return {"name": "in{}".format(num), "offset": 0.0, "scale": 1.0}
top = {
"inputs": [build_input(x) for x in range(1,5)],
"class_labels": ["out{}".format(x) for x in range(1,5)]
}
with open(name, 'w') as out_file:
out_file.write(json.dumps(top, indent=2))
def _build_keras_weights(name, verbose):
half_swap = np.zeros((4,4))
half_swap[0,3] = 1.0
half_swap[1,2] = 1.0
if verbose:
print(half_swap)
bias = np.zeros(4)
with h5py.File(name, 'w') as h5_file:
layer0 = h5_file.create_group("layer_0")
layer0.create_dataset("param_0", data=half_swap)
layer0.create_dataset("param_1", data=bias)
if __name__ == "__main__":
_run()
|
|
cb98b4a1580e4976de375722012483bf51ef9254
|
scripts/get_mendeley_papers.py
|
scripts/get_mendeley_papers.py
|
###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
|
Add interactive script to get papers from Mendeley API
|
Add interactive script to get papers from Mendeley API
|
Python
|
apache-2.0
|
isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp,isb-cgc/ISB-CGC-Webapp
|
Add interactive script to get papers from Mendeley API
|
###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add interactive script to get papers from Mendeley API<commit_after>
|
###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
|
Add interactive script to get papers from Mendeley API###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add interactive script to get papers from Mendeley API<commit_after>###
# Copyright 2015-2020, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from mendeley import Mendeley
def test():
client_id = 9526
client_secret = "AmIvWP7FRxeLHX7n"
redirect_uri = "https://isb-cgc.appspot.com/"
# These values should match the ones supplied when registering your application.
mendeley = Mendeley(client_id, client_secret=client_secret, redirect_uri=redirect_uri)
auth = mendeley.start_implicit_grant_flow()
# The user needs to visit this URL, and log in to Mendeley.
login_url = auth.get_login_url()
print("Go to this link to log in: \n" + login_url)
# After logging in, the user will be redirected to a URL, auth_response.
auth_response = input("Copy the redirect link here: \n")
auth_response = auth_response.rstrip()
# print("** Response is: " + auth_response)
session = auth.authenticate(auth_response)
# print(session.token['access_token'])
# List all groups I have access to
groups = session.groups.iter()
i = 1
for g in groups:
print("[{}] {}".format(i, g.name))
i = i + 1
# Let choose a group
selected_index = int(input('Select group to get paper from: '))
i = 1
group_id = ''
groups = session.groups.iter()
for g in groups:
if i == selected_index:
group_id = g.id
break
i = i + 1
if group_id == '':
quit()
# Get all the documents in the group
target_group = session.groups.get(group_id)
docs = target_group.documents.iter()
for d in docs:
print(d.title)
print("something")
if __name__ == "__main__":
test()
|
|
731bc1308e94cdb341511618ba5739f6fd37b0b7
|
regressions/__init__.py
|
regressions/__init__.py
|
# regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
|
Add a base regressions package
|
Add a base regressions package
This contains only various imports / definitions that will be useful
in the child modules and should be able to be used via
'from .. import *' without issues.
|
Python
|
isc
|
jhumphry/regressions
|
Add a base regressions package
This contains only various imports / definitions that will be useful
in the child modules and should be able to be used via
'from .. import *' without issues.
|
# regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
|
<commit_before><commit_msg>Add a base regressions package
This contains only various imports / definitions that will be useful
in the child modules and should be able to be used via
'from .. import *' without issues.<commit_after>
|
# regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
|
Add a base regressions package
This contains only various imports / definitions that will be useful
in the child modules and should be able to be used via
'from .. import *' without issues.# regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
|
<commit_before><commit_msg>Add a base regressions package
This contains only various imports / definitions that will be useful
in the child modules and should be able to be used via
'from .. import *' without issues.<commit_after># regressions
"""A package which implements various forms of regression."""
import numpy as np
try:
import scipy.linalg as linalg
linalg_source = 'scipy'
except ImportError:
import numpy.linalg as linalg
linalg_source = 'numpy'
class ParameterError(Exception):
"""Parameters passed to a regression routine are unacceptable"""
pass
# Maximum iterations that will be attempted by iterative routines by
# default
DEFAULT_MAX_ITERATIONS = 100
# A default epsilon value used in various places, such as to decide when
# iterations have converged
DEFAULT_EPSILON = 0.01
|
|
c6c87e1aafa6b8a4f7929c491398574921417bd4
|
tests/webcam_framerate.py
|
tests/webcam_framerate.py
|
#!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
|
Add initial framerate webcam test structure
|
Add initial framerate webcam test structure
|
Python
|
mit
|
daveol/Fedora-Test-Laptop,daveol/Fedora-Test-Laptop
|
Add initial framerate webcam test structure
|
#!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
|
<commit_before><commit_msg>Add initial framerate webcam test structure<commit_after>
|
#!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
|
Add initial framerate webcam test structure#!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
|
<commit_before><commit_msg>Add initial framerate webcam test structure<commit_after>#!/usr/bin/env python
import qrtools, gi, os
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
from gi.repository import Gtk, Gst
from avocado import Test
from utils import webcam
class WebcamReadQR(Test):
"""
Uses the camera selected by v4l2src by default (/dev/video0) to get the
framerate by creating a pipeline with an fpsdisplaysink and initializing
Gtk main loop. For now is tested whether the framerate is 30 or more.
"""
def setUp(self):
self.error = None
#if not os.path.exists('/dev/video0'):
#self.skip("No webcam detected: /dev/video0 cannot be found");
def test(self):
elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false '
'signal-fps-measurements=true']
webcam.create_video_pipeline(self, gst_elements=elements,
v4l2src_args="num-buffers=2000")
bus = self.video_player.get_bus()
bus.connect("fps-measurements", self.on_fps_measurement)
Gtk.main()
if self.error != None:
self.fail("Error: {0}".format(self.error))
if self.fps < 30:
self.fail("Measured fps is below 30, {0}".format(self.fps))
self.log.debug("Measured fps is 30 or more, {0}".format(self.fps))
def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps):
self.fps = avgfps
def on_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
webcam.exit(self)
elif t == Gst.MessageType.ERROR:
webcam.exit(self)
self.error = message.parse_error()
|
|
b522fed0a1ca2570b8652ddb64b8c847d5964d11
|
list_all_codes.py
|
list_all_codes.py
|
#!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
|
Add a script to generate all known codes and their decoding
|
Add a script to generate all known codes and their decoding
|
Python
|
mit
|
baruch/lsi_decode_loginfo
|
Add a script to generate all known codes and their decoding
|
#!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
|
<commit_before><commit_msg>Add a script to generate all known codes and their decoding<commit_after>
|
#!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
|
Add a script to generate all known codes and their decoding#!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
|
<commit_before><commit_msg>Add a script to generate all known codes and their decoding<commit_after>#!/usr/bin/env python
import lsi_decode_loginfo as loginfo
def generate_values(data):
title = data[0]
mask = data[1]
sub = data[2]
for key in sub.keys():
v = sub[key]
key_name = v[0]
key_sub = v[1]
key_detail = v[2]
if key_sub is None:
yield [(title, key, key_name, key_detail)]
else:
for sub_val in generate_values(key_sub):
yield [(title, key, key_name, key_detail)] + sub_val
for entry in generate_values(loginfo.types):
val = 0
for line in entry:
val |= line[1]
print ' %-10s\t0x%08X' % ('Value', val)
for line in entry:
print ' %-10s\t0x%08X %s %s' % (line[0], line[1], line[2], line[3])
print
print ' '
print
|
|
985f23ee5e107c647d5f5e5b245c3fb7ff2d411b
|
bin/to_expected.py
|
bin/to_expected.py
|
#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
|
Write script to convert PMF-based result to expected value
|
Write script to convert PMF-based result to expected value
|
Python
|
mit
|
kemskems/otdet
|
Write script to convert PMF-based result to expected value
|
#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
|
<commit_before><commit_msg>Write script to convert PMF-based result to expected value<commit_after>
|
#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
|
Write script to convert PMF-based result to expected value#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
|
<commit_before><commit_msg>Write script to convert PMF-based result to expected value<commit_after>#!/usr/bin/env python
import argparse
import numpy as np
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert result from PMF'
' to expected value')
parser.add_argument('file', type=str,
help='Result DataFrame in HDF5 format')
parser.add_argument('outfile', type=str,
help='Output file')
parser.add_argument('--hdf-key', type=str, default='df',
help='Identifier in the HDF5 store')
args = parser.parse_args()
df = pd.read_hdf(args.file, args.hdf_key)
data = np.array([])
grouped = df.groupby(level=df.columns.names[:4], axis=1)
columns = []
for name, _ in grouped:
columns.append(name)
pmf = df[name].values
supp = np.array(df[name].columns)
expected = np.sum(supp*pmf, axis=1)
data = np.concatenate((data, expected))
index = df.index.copy()
columns = pd.MultiIndex.from_tuples(columns)
df2 = pd.DataFrame(data.reshape((len(index), len(columns))), index=index,
columns=columns)
df2.to_hdf(args.outfile, args.hdf_key)
print("Stored in HDF5 format with the name '{}'".format(args.hdf_key))
|
|
3f6ec1a3e9bcdd2dee714e74fac7215b19ae432f
|
blocking_socket.py
|
blocking_socket.py
|
"""
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
|
Add an example of a blocking tcp server
|
Add an example of a blocking tcp server
The example implements a basic telnet server.
|
Python
|
mit
|
facundovictor/non-blocking-socket-samples
|
Add an example of a blocking tcp server
The example implements a basic telnet server.
|
"""
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
|
<commit_before><commit_msg>Add an example of a blocking tcp server
The example implements a basic telnet server.<commit_after>
|
"""
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
|
Add an example of a blocking tcp server
The example implements a basic telnet server."""
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
|
<commit_before><commit_msg>Add an example of a blocking tcp server
The example implements a basic telnet server.<commit_after>"""
A Simple example for testing the SimpleServer Class. A simple telnet server.
It is for studying purposes only.
"""
from server import SimpleServer
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
def handle_message(sockets=None):
"""
Handle a simple TCP connection.
"""
if sockets is not None:
(readable, writable, errors) = sockets
try:
while True:
data = readable.recv(1024)
print('Received data: %s' % (data))
if data:
print('Sending a custom ACK to the client')
writable.sendall("Received ;)\n")
else:
print('Received empty data')
break
finally:
SS.close_connection()
SS = SimpleServer(blocking=True)
SS.register_handler(handle_message)
SS.bind_and_listeen("localhost", 7878)
|
|
cc3b29aaa2c0ffa3cde6b901bf4bdf3ce3fb4345
|
pybaseball/league_pitching_stats.py
|
pybaseball/league_pitching_stats.py
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
Add code for pulling pitcher stats for specified date range
|
Add code for pulling pitcher stats for specified date range
|
Python
|
mit
|
jldbc/pybaseball
|
Add code for pulling pitcher stats for specified date range
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
<commit_before><commit_msg>Add code for pulling pitcher stats for specified date range<commit_after>
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
Add code for pulling pitcher stats for specified date rangeimport requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
<commit_before><commit_msg>Add code for pulling pitcher stats for specified date range<commit_after>import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=p&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def pitching_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
|
701f6a06b8405620905a67b47c5702c100a1447a
|
scripts/check_sorted.py
|
scripts/check_sorted.py
|
import sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
|
Check to make sure the input file is sorted
|
Check to make sure the input file is sorted
|
Python
|
mit
|
hms-dbmi/clodius,hms-dbmi/clodius
|
Check to make sure the input file is sorted
|
import sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
|
<commit_before><commit_msg>Check to make sure the input file is sorted<commit_after>
|
import sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
|
Check to make sure the input file is sortedimport sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
|
<commit_before><commit_msg>Check to make sure the input file is sorted<commit_after>import sys
prev_val = 0
prev_val2 = 0
counter = 0
for line in sys.stdin:
parts = line.split()
curr_val = int(parts[0])
curr_val2 = int(parts[1])
val1 = int(parts[0])
val2 = int(parts[1])
if val1 > val2:
print >>sys.stderr, "Not triangular:", counter
sys.exit(1)
if curr_val < prev_val:
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
elif curr_val == prev_val:
if (curr_val2 < prev_val2):
print >>sys.stderr, "Not sorted, line:", counter
sys.exit(1)
prev_val = curr_val
prev_val2 = curr_val2
counter += 1
if counter % 1000000 == 0:
print "counter:", counter, prev_val, curr_val
|
|
bdf5cfb2a7b716d897dabd62e591caad8144a029
|
utils/populate-funding.py
|
utils/populate-funding.py
|
#!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
|
Add election funding parsing script
|
Add election funding parsing script
|
Python
|
agpl-3.0
|
kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu
|
Add election funding parsing script
|
#!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
|
<commit_before><commit_msg>Add election funding parsing script<commit_after>
|
#!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
|
Add election funding parsing script#!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
|
<commit_before><commit_msg>Add election funding parsing script<commit_after>#!/usr/bin/python
import os
import sys
import csv
from optparse import OptionParser
from django.core.management import setup_environ
my_path = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.normpath(my_path + '/..')
app_base = app_path + '/'
# We need a path like '<app_path>/utils:<app_path>:<app_path>/..'
# The first one is inserted by python itself. The order is important to
# guarantee that we'll import the proper app specific module in case there
# is also a generic (non-app-specific) module with the same name later in
# the path.
sys.path.insert(1, app_path)
sys.path.insert(2, os.path.normpath(app_path + '/..'))
from kamu import settings
setup_environ(settings)
from django.db import connection, transaction
from django import db
from votes.models import Member, TermMember, Term, MemberStats
parser = OptionParser()
parser.add_option('--input', action='store', type='string', dest='input',
help='input file')
(opts, args) = parser.parse_args()
if not opts.input:
exit(1)
MEMBER_NAME_TRANSFORMS = {
'Korhonen Timo': 'Korhonen Timo V.',
'Ollila Heikki': 'Ollila Heikki A.',
'Saarela Tanja': 'Karpela Tanja',
'Kumpula Miapetra': 'Kumpula-Natri Miapetra',
'Forsius-Harkimo Merikukka': 'Forsius Merikukka',
}
TERM="2007-2010"
term = Term.objects.get(name=TERM)
f = open(opts.input, 'r')
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
first_name = row[1].strip()
last_name = row[0].strip()
budget = row[4].strip().replace(',', '')
name = "%s %s" % (last_name, first_name)
if name in MEMBER_NAME_TRANSFORMS:
name = MEMBER_NAME_TRANSFORMS[name]
print "%-20s %-20s %10s" % (first_name, last_name, budget)
try:
member = Member.objects.get(name=name)
tm = TermMember.objects.get(member=member, term=term)
except Member.DoesNotExist:
continue
except TermMember.DoesNotExist:
continue
ms = MemberStats.objects.get(begin=term.begin, end=term.end, member=member)
tm.election_budget = budget
tm.save()
ms.election_budget = budget
ms.save()
f.close()
|
|
03de607d14805779ed9653b65a5bd5cee3525903
|
server/ifttt_on_campaign_success.py
|
server/ifttt_on_campaign_success.py
|
import collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
|
Add the IFTTT campaign success server plugin
|
Add the IFTTT campaign success server plugin
|
Python
|
bsd-3-clause
|
securestate/king-phisher-plugins,zeroSteiner/king-phisher-plugins,securestate/king-phisher-plugins,wolfthefallen/king-phisher-plugins,zeroSteiner/king-phisher-plugins,wolfthefallen/king-phisher-plugins
|
Add the IFTTT campaign success server plugin
|
import collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
|
<commit_before><commit_msg>Add the IFTTT campaign success server plugin<commit_after>
|
import collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
|
Add the IFTTT campaign success server pluginimport collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
|
<commit_before><commit_msg>Add the IFTTT campaign success server plugin<commit_after>import collections
import king_phisher.plugins as plugin_opts
import king_phisher.server.plugins as plugins
import king_phisher.server.signals as signals
import requests
class Plugin(plugins.ServerPlugin):
authors = ['Spencer McIntyre']
title = 'IFTTT Campaign Success Notification'
description = """
A plugin that will publish an event to a specified IFTTT Maker channel when
a campaign has been deemed 'successful'.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugin_opts.OptionString(
name='api_key',
description='Maker channel API key'
),
plugin_opts.OptionString(
name='event_name',
description='Maker channel Event name'
)
]
def initialize(self):
signals.db_session_inserted.connect(self.on_kp_db_event, sender='visits')
return True
def on_kp_db_event(self, sender, targets, session):
campaign_ids = collection.deque()
for event in targets:
cid = event.campaign_id
if cid in campaign_ids:
continue
if not self.check_campaign(session, cid):
continue
campaign_ids.append(cid)
self.send_notification()
def check_campaign(self, session, cid):
campaign = db_manager.get_row_by_id(session, db_models.Campaign, cid)
if campaign.has_expired:
# the campaign can not be exipred
return False
unique_targets = session.query(models.Message.target_email)
unique_targets = unique_targets.filter_by(campaign_id=cid)
unique_targets = float(unique_targets.distinct().count())
if unique_targets < 5:
# the campaign needs at least 5 unique targets
return False
success_percentage = 0.25
unique_visits = session.query(models.Visit.message_id)
unique_visits = unique_visits.filter_by(campaign_id=cid)
unique_visits = float(unique_visits.distinct().count())
if unique_visits / unique_targets < success_percentage:
# the campaign is not yet classified as successful
return False
if (unique_visits - 1) / unique_targets >= success_percentage:
# the campaign has already been classified as successful
return False
return True
def send_notification(self):
try:
resp = requests.post("https://maker.ifttt.com/trigger/{0}/with/key/{1}".format(self.config['event_name'], self.config['api_key']))
except Exception as error:
self.logger.error('failed to post a notification of a successful campaign (exception)', exc_info=True)
return
if not resp.ok:
self.logger.error('failed to post a notification of a successful campaign (request)')
return
self.logger.info('successfully posted notification of a successful campaign')
|
|
b647416b719c9f0b2534c13a67d3396fefaada47
|
p001_multiples_of_3_and_5.py
|
p001_multiples_of_3_and_5.py
|
#
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
Add problem 1 sum muliples of 3 or 5 python solution
|
Add problem 1 sum muliples of 3 or 5 python solution
|
Python
|
mit
|
ChrisFreeman/project-euler
|
Add problem 1 sum muliples of 3 or 5 python solution
|
#
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add problem 1 sum muliples of 3 or 5 python solution<commit_after>
|
#
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
Add problem 1 sum muliples of 3 or 5 python solution#
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add problem 1 sum muliples of 3 or 5 python solution<commit_after>#
'''
Project Euler - Problem 1 - Multiples of 3 and 5
https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
import sys
def main():
'''Sum the numbers from 1 through 999 that are multiples of either 3 or 5.
'''
# get list of numbers using list comprehension
numbers = [x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0]
count = len(numbers)
total = sum(numbers)
# display length of list and the sum of its elements
print("There are {0} numbers from 1 through 999 that are multiples of either"
"3 or 5. Their sum is: {1}".format(count, total))
# One line alternative solution
# sum the output of a generator whose elements are from 1 to 999 and provided
# they are a multiple of 3 or 5 using modulo arithmetic. No intermediate list
# is constructed.
total = sum(x for x in range(1, 1000) if x % 3 == 0 or x % 5 == 0)
print("Alternative: Sum of numbers 1 through 999 that are multiples of either"
" 3 or 5: {0}".format(total))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
|
0c2f07fabb94698b8cf1b42a4f671ad0cd5e365f
|
src/ggrc/migrations/versions/20160321011353_3914dbf78dc1_add_comment_notification_type.py
|
src/ggrc/migrations/versions/20160321011353_3914dbf78dc1_add_comment_notification_type.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
|
Add migration for comment notification type
|
Add migration for comment notification type
|
Python
|
apache-2.0
|
edofic/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core
|
Add migration for comment notification type
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
|
<commit_before><commit_msg>Add migration for comment notification type<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
|
Add migration for comment notification type# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
|
<commit_before><commit_msg>Add migration for comment notification type<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add comment notification type
Create Date: 2016-03-21 01:13:53.293580
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from alembic import op
# revision identifiers, used by Alembic.
revision = '3914dbf78dc1'
down_revision = '50c374901d42'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
NOTIFICATIONS = [{
"name": "comment_created",
"description": "Notify selected users that a comment has been created",
"template": "comment_created",
"advance_notice": 0,
"instant": False,
}]
def upgrade():
"""Add notification type entries for requests and assessments."""
op.bulk_insert(NOTIFICATION_TYPES, NOTIFICATIONS)
def downgrade():
"""Remove notification type entries for requests and assessments."""
notification_names = [notif["name"] for notif in NOTIFICATIONS]
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_(notification_names)
)
)
|
|
6f7ed6f3b082c7f6399ab456a6f6b291219c910f
|
product_uom_prices/migrations/8.0.0.5.0/pre-migration.py
|
product_uom_prices/migrations/8.0.0.5.0/pre-migration.py
|
# -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
|
ADD migration scripts for uom prices
|
ADD migration scripts for uom prices
|
Python
|
agpl-3.0
|
ingadhoc/product,ingadhoc/product
|
ADD migration scripts for uom prices
|
# -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
|
<commit_before><commit_msg>ADD migration scripts for uom prices<commit_after>
|
# -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
|
ADD migration scripts for uom prices# -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
|
<commit_before><commit_msg>ADD migration scripts for uom prices<commit_after># -*- encoding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
def set_value(cr, model, table, field, value, condition):
print 'Set value %s on field %s on table %s' % (
value, field, table)
cr.execute('SELECT id '
'FROM %(table)s '
'%(condition)s' % {
'table': table,
'condition': condition,
})
for row in cr.fetchall():
model.write(cr, SUPERUSER_ID, row[0], {field: value})
def migrate(cr, version):
print 'Migrating product_uom_prices'
if not version:
return
registry = RegistryManager.get(cr.dbname)
model = 'product.template'
table = 'product_template'
field = "list_price_type"
value = "by_uom"
condition = "WHERE use_uom_prices"
set_value(
cr,
registry[model],
table,
field,
value,
condition,
)
|
|
121a80669b4b50665a7baafd3434cb3e574087f4
|
bluebottle/bb_projects/migrations/0004_adjust_phases.py
|
bluebottle/bb_projects/migrations/0004_adjust_phases.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
|
Adjust phases to make campaign non-editable
|
Adjust phases to make campaign non-editable
|
Python
|
bsd-3-clause
|
jfterpstra/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle
|
Adjust phases to make campaign non-editable
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
|
<commit_before><commit_msg>Adjust phases to make campaign non-editable<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
|
Adjust phases to make campaign non-editable# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
|
<commit_before><commit_msg>Adjust phases to make campaign non-editable<commit_after># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=False)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=False)
def backwards(self, orm):
"Write your backwards methods here."
orm['bb_projects.ProjectPhase'].objects.filter(slug='campaign').update(editable=True)
orm['bb_projects.ProjectPhase'].objects.filter(slug='done-complete').update(editable=True)
models = {
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['bb_projects']
symmetrical = True
|
|
1b36f7e837f6c15cab838edfaf6464bef0c88c6d
|
src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py
|
src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py
|
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
|
Add migration for request notification types
|
Add migration for request notification types
|
Python
|
apache-2.0
|
NejcZupec/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core
|
Add migration for request notification types
|
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
|
<commit_before><commit_msg>Add migration for request notification types<commit_after>
|
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
|
Add migration for request notification types
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
|
<commit_before><commit_msg>Add migration for request notification types<commit_after>
"""Add request notification types
Revision ID: 50c374901d42
Revises: 4e989ef86619
Create Date: 2016-03-04 12:45:23.024224
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = '50c374901d42'
down_revision = '1839dabd2357'
NOTIFICATION_TYPES = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer),
column('advance_notice_end', sa.Integer),
column('created_at', sa.DateTime),
column('modified_by_id', sa.Integer),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
op.bulk_insert(
NOTIFICATION_TYPES,
[{
"name": "request_open",
"description": ("Notify all assignees Requesters Assignees and "
"Verifiers that a new request has been created."),
"template": "request_open",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_declined",
"description": "Notify Requester that a request has been declined.",
"template": "request_declined",
"advance_notice": 0,
"instant": False,
}, {
"name": "request_manual",
"description": "Send a manual notification to the Requester.",
"template": "request_manual",
"advance_notice": 0,
"instant": False,
}]
)
def downgrade():
op.execute(
NOTIFICATION_TYPES.delete().where(
NOTIFICATION_TYPES.c.name.in_([
"request_open",
"request_declined",
"request_manual",
])
)
)
|
|
72b701652271178e08d9cccd088d24177d4a2fc6
|
pyblogit/database_handler.py
|
pyblogit/database_handler.py
|
"""
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
|
Add functions for storing/getting blogs and posts
|
Add functions for storing/getting blogs and posts
|
Python
|
mit
|
jamalmoir/pyblogit
|
Add functions for storing/getting blogs and posts
|
"""
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
|
<commit_before><commit_msg>Add functions for storing/getting blogs and posts<commit_after>
|
"""
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
|
Add functions for storing/getting blogs and posts"""
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
|
<commit_before><commit_msg>Add functions for storing/getting blogs and posts<commit_after>"""
pyblogit.database_handler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the connection and manipulation of the local database.
"""
import sqlite3
def get_cursor(blog_id):
"""Connects to a local sqlite database"""
conn = sqlite3.connect(blog_id)
c = conn.cursor()
return c
def add_blog(blog_id, blog_name):
"""Adds a new blog to the local blogs database and
creates a new database for the blog."""
# These two statements create the database files if
# they don't exist.
c = get_cursor('blogs')
blog_c = get_cursor(blog_id)
# Check if blogs table exists, if it doesn't create it.
exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type="table"
AND name="blogs"'))
if not exists:
c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')
sql = ('INSERT INTO blogs(blog_id, blog_name) values({blog_id},
{blog_name})'.format(blog_id=blog_id, blog_name=blog_name))
c.execute(sql)
# Create table to store posts in new blog's database.
blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,
status TEXT, content TEXT, updated INT)')
def get_blogs():
"""Returns all stored blogs."""
c = get_cursor('blogs')
blogs = c.execute('SELECT * FROM blogs')
return blogs
def get_post(blog_id, post_id):
"""Retrieves a post from a local database."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts WHERE post_id = {p_id}'.format(p_id=post_id)
c.execute(sql)
post = c.fetchone()
return post
def get_posts(blog_id, limit=None):
"""Retrieves all the posts from a local database, if a limit
is specified, it will retrieve up to that amount of posts."""
c = get_cursor(blog_id)
sql = 'SELECT * FROM posts'
if limit:
limit = 'LIMIT {lim}'.format(lim=limit)
sql = ''.join([sql, limit])
c.execute(sql)
posts = c.fetchall()
return posts
def update_post(blog_id, post_id, post):
# TODO: update post in local database
pass
def add_post(blog_id, post):
# TODO: insert new post in local database
pass
|
|
726316b50209dfc5f6a8f6373cd7e3f53e267bb3
|
geodj/genre_parser.py
|
geodj/genre_parser.py
|
import re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
|
Implement a genre string parser
|
Implement a genre string parser
|
Python
|
mit
|
6/GeoDJ,6/GeoDJ
|
Implement a genre string parser
|
import re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
|
<commit_before><commit_msg>Implement a genre string parser<commit_after>
|
import re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
|
Implement a genre string parserimport re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
|
<commit_before><commit_msg>Implement a genre string parser<commit_after>import re
from django.utils.encoding import smart_str
class GenreParser:
@staticmethod
def parse(genre):
genre = smart_str(genre).lower()
if re.search(r"\b(jazz|blues)\b", genre):
return "jazz"
if re.search(r"\b(ska|reggae|ragga|dub)\b", genre):
return "ska"
elif re.search(r"\b(r&b|funk|soul)\b", genre):
return "r&b"
elif re.search(r"\bfolk\b", genre):
return "folk"
elif re.search(r"\b(country|bluegrass)\b", genre):
return "country"
elif re.search(r"\b(rap|hip hop|crunk|trip hop)\b", genre):
return "hiphop"
elif re.search(r"\bpop\b", genre):
return "pop"
elif re.search(r"\b(rock|metal|punk)\b", genre):
return "rock"
elif re.search(r"\b(electronic|electronica|electro|house|techno|ambient|chiptune|industrial|downtempo|drum and bass|trance|dubstep)\b", genre):
return "electronic"
elif re.search(r"\b(classical|orchestra|opera|piano|violin|cello)\b", genre):
return "classical"
|
|
d5c7d429be93a2b2de4a1c09bd73f72c02664499
|
experimental/directshow.py
|
experimental/directshow.py
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.
|
Move win32 audio experiment to trunk.
|
Python
|
bsd-3-clause
|
adamlwgriffiths/Pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,niklaskorz/pyglet,niklaskorz/pyglet,niklaskorz/pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,adamlwgriffiths/Pyglet,seeminglee/pyglet64,niklaskorz/pyglet
|
Move win32 audio experiment to trunk.
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
|
0a0d31077746e69bf5acc7d90fa388e121544339
|
script_skeleton.py
|
script_skeleton.py
|
#!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
|
Add skeleton for new python scripts.
|
Add skeleton for new python scripts.
|
Python
|
mit
|
lweasel/misc_bioinf,lweasel/misc_bioinf,lweasel/misc_bioinf
|
Add skeleton for new python scripts.
|
#!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
|
<commit_before><commit_msg>Add skeleton for new python scripts.<commit_after>
|
#!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
|
Add skeleton for new python scripts.#!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
|
<commit_before><commit_msg>Add skeleton for new python scripts.<commit_after>#!/usr/bin/python
"""Usage:
<SCRIPT_NAME> [--log-level=<log-level>]
-h --help
Show this message.
-v --version
Show version.
--log-level=<log-level>
Set logging level (one of {log_level_vals}) [default: info].
"""
import docopt
import ordutils.log as log
import ordutils.options as opt
import schema
import sys
LOG_LEVEL = "--log-level"
LOG_LEVEL_VALS = str(log.LEVELS.keys())
def validate_command_line_options(options):
# Validate command-line options
try:
opt.validate_dict_option(
options[LOG_LEVEL], log.LEVELS, "Invalid log level")
except schema.SchemaError as exc:
exit(exc.code)
def main(docstring):
# Read in and validate command line options
options = docopt.docopt(docstring, version="<SCRIPT_NAME> v0.1")
validate_command_line_options(options)
# Set up logger
logger = log.getLogger(sys.stderr, options[LOG_LEVEL])
# Rest of script...
if __name__ == "__main__":
main(__doc__)
|
|
73f75483156056b61f3b6bec4fe2f09522c2c34a
|
test/integration/ggrc/models/test_eager_query.py
|
test/integration/ggrc/models/test_eager_query.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
|
Add tests for mixin order
|
Add tests for mixin order
Test that mixin order on models does not hide any eager queries.
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core
|
Add tests for mixin order
Test that mixin order on models does not hide any eager queries.
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
|
<commit_before><commit_msg>Add tests for mixin order
Test that mixin order on models does not hide any eager queries.<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
|
Add tests for mixin order
Test that mixin order on models does not hide any eager queries.# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
|
<commit_before><commit_msg>Add tests for mixin order
Test that mixin order on models does not hide any eager queries.<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
|
|
6ed99163b10209566a0575a9a67d1ab2ad552fd9
|
tests/views/test_committee_subscriptions_page.py
|
tests/views/test_committee_subscriptions_page.py
|
import datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
|
Add test for committee subscriptions page
|
Add test for committee subscriptions page
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add test for committee subscriptions page
|
import datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
|
<commit_before><commit_msg>Add test for committee subscriptions page<commit_after>
|
import datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
|
Add test for committee subscriptions pageimport datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
|
<commit_before><commit_msg>Add test for committee subscriptions page<commit_after>import datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
|
|
d94123ba898032e7837aa8a2fd0fe585ed81e2d5
|
scrapi/processing/storage.py
|
scrapi/processing/storage.py
|
import os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
|
Add back a filesystem backend for testing and development
|
Add back a filesystem backend for testing and development
|
Python
|
apache-2.0
|
ostwald/scrapi,CenterForOpenScience/scrapi,felliott/scrapi,fabianvf/scrapi,icereval/scrapi,erinspace/scrapi,felliott/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,fabianvf/scrapi,alexgarciac/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi
|
Add back a filesystem backend for testing and development
|
import os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
|
<commit_before><commit_msg>Add back a filesystem backend for testing and development<commit_after>
|
import os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
|
Add back a filesystem backend for testing and developmentimport os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
|
<commit_before><commit_msg>Add back a filesystem backend for testing and development<commit_after>import os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
|
|
b2d60408688cc1bf27842d8744d1048a64b00e94
|
scripts/staff_public_regs.py
|
scripts/staff_public_regs.py
|
# -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
|
Add script to get public registrations for staff members
|
Add script to get public registrations for staff members
[skip ci]
|
Python
|
apache-2.0
|
KAsante95/osf.io,jolene-esposito/osf.io,HarryRybacki/osf.io,jinluyuan/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,amyshi188/osf.io,caseyrygt/osf.io,haoyuchen1992/osf.io,jinluyuan/osf.io,laurenrevere/osf.io,SSJohns/osf.io,lyndsysimon/osf.io,HalcyonChimera/osf.io,amyshi188/osf.io,cslzchen/osf.io,zamattiac/osf.io,mluo613/osf.io,asanfilippo7/osf.io,jolene-esposito/osf.io,himanshuo/osf.io,DanielSBrown/osf.io,zamattiac/osf.io,monikagrabowska/osf.io,arpitar/osf.io,icereval/osf.io,emetsger/osf.io,chrisseto/osf.io,GaryKriebel/osf.io,doublebits/osf.io,MerlinZhang/osf.io,mluke93/osf.io,himanshuo/osf.io,arpitar/osf.io,lyndsysimon/osf.io,barbour-em/osf.io,jeffreyliu3230/osf.io,jeffreyliu3230/osf.io,samanehsan/osf.io,jnayak1/osf.io,TomBaxter/osf.io,alexschiller/osf.io,amyshi188/osf.io,brandonPurvis/osf.io,danielneis/osf.io,dplorimer/osf,caseyrygt/osf.io,jmcarp/osf.io,barbour-em/osf.io,monikagrabowska/osf.io,doublebits/osf.io,GageGaskins/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,jmcarp/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,billyhunt/osf.io,billyhunt/osf.io,wearpants/osf.io,chennan47/osf.io,aaxelb/osf.io,bdyetton/prettychart,zachjanicki/osf.io,lamdnhan/osf.io,abought/osf.io,kwierman/osf.io,cwisecarver/osf.io,jeffreyliu3230/osf.io,abought/osf.io,samanehsan/osf.io,wearpants/osf.io,zkraime/osf.io,caneruguz/osf.io,rdhyee/osf.io,CenterForOpenScience/osf.io,reinaH/osf.io,acshi/osf.io,acshi/osf.io,danielneis/osf.io,felliott/osf.io,lamdnhan/osf.io,adlius/osf.io,SSJohns/osf.io,zkraime/osf.io,chennan47/osf.io,asanfilippo7/osf.io,chrisseto/osf.io,HarryRybacki/osf.io,ZobairAlijan/osf.io,GageGaskins/osf.io,petermalcolm/osf.io,GaryKriebel/osf.io,njantrania/osf.io,mluo613/osf.io,reinaH/osf.io,ticklemepierce/osf.io,mluo613/osf.io,GageGaskins/osf.io,crcresearch/osf.io,jmcarp/osf.io,mattclark/osf.io,GaryKriebel/osf.io,zachjanicki/osf.io,caneruguz/osf.io,ticklemepierce/osf.io,lamdnhan/osf.io,kwierman/osf.io,SSJohns/osf.io,amyshi188/osf.io,dplorimer/osf,caseyrygt/osf.io,sbt9uc/osf.io,brandonPurvis/osf.io,arpitar/osf.io,monikagrabowska/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,icereval/osf.io,caseyrygt/osf.io,KAsante95/osf.io,jnayak1/osf.io,saradbowman/osf.io,njantrania/osf.io,jinluyuan/osf.io,sloria/osf.io,Ghalko/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,zkraime/osf.io,jolene-esposito/osf.io,samchrisinger/osf.io,samanehsan/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,RomanZWang/osf.io,mluke93/osf.io,binoculars/osf.io,erinspace/osf.io,himanshuo/osf.io,mluo613/osf.io,caneruguz/osf.io,crcresearch/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,reinaH/osf.io,adlius/osf.io,hmoco/osf.io,acshi/osf.io,zamattiac/osf.io,pattisdr/osf.io,mfraezz/osf.io,jolene-esposito/osf.io,njantrania/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,Ghalko/osf.io,alexschiller/osf.io,KAsante95/osf.io,KAsante95/osf.io,RomanZWang/osf.io,arpitar/osf.io,chrisseto/osf.io,revanthkolli/osf.io,zachjanicki/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,lamdnhan/osf.io,acshi/osf.io,TomBaxter/osf.io,mfraezz/osf.io,kwierman/osf.io,mfraezz/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,TomHeatwole/osf.io,lyndsysimon/osf.io,Ghalko/osf.io,njantrania/osf.io,pattisdr/osf.io,ZobairAlijan/osf.io,SSJohns/osf.io,danielneis/osf.io,samchrisinger/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,TomHeatwole/osf.io,himanshuo/osf.io,crcresearch/osf.io,HarryRybacki/osf.io,HalcyonChimera/osf.io,revanthkolli/osf.io,sloria/osf.io,caseyrollins/osf.io,alexschiller/osf.io,haoyuchen1992/osf.io,jmcarp/osf.io,chennan47/osf.io,KAsante95/osf.io,laurenrevere/osf.io,felliott/osf.io,Johnetordoff/osf.io,felliott/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,icereval/osf.io,binoculars/osf.io,bdyetton/prettychart,fabianvf/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,mattclark/osf.io,ckc6cz/osf.io,jinluyuan/osf.io,bdyetton/prettychart,emetsger/osf.io,abought/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,barbour-em/osf.io,alexschiller/osf.io,samanehsan/osf.io,mattclark/osf.io,cslzchen/osf.io,cosenal/osf.io,dplorimer/osf,baylee-d/osf.io,Nesiehr/osf.io,doublebits/osf.io,rdhyee/osf.io,cwisecarver/osf.io,sbt9uc/osf.io,billyhunt/osf.io,ckc6cz/osf.io,hmoco/osf.io,MerlinZhang/osf.io,bdyetton/prettychart,mfraezz/osf.io,aaxelb/osf.io,mluke93/osf.io,fabianvf/osf.io,ZobairAlijan/osf.io,Johnetordoff/osf.io,ckc6cz/osf.io,sloria/osf.io,GageGaskins/osf.io,RomanZWang/osf.io,kch8qx/osf.io,ckc6cz/osf.io,adlius/osf.io,acshi/osf.io,Nesiehr/osf.io,pattisdr/osf.io,erinspace/osf.io,hmoco/osf.io,aaxelb/osf.io,barbour-em/osf.io,caseyrollins/osf.io,fabianvf/osf.io,zkraime/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,leb2dg/osf.io,doublebits/osf.io,caneruguz/osf.io,sbt9uc/osf.io,leb2dg/osf.io,RomanZWang/osf.io,binoculars/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,jeffreyliu3230/osf.io,laurenrevere/osf.io,cosenal/osf.io,cldershem/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,revanthkolli/osf.io,reinaH/osf.io,kwierman/osf.io,cldershem/osf.io,mluke93/osf.io,rdhyee/osf.io,asanfilippo7/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,samchrisinger/osf.io,saradbowman/osf.io,felliott/osf.io,caseyrollins/osf.io,cslzchen/osf.io,fabianvf/osf.io,ticklemepierce/osf.io,erinspace/osf.io,sbt9uc/osf.io,baylee-d/osf.io,brandonPurvis/osf.io,Nesiehr/osf.io,jnayak1/osf.io,petermalcolm/osf.io,cosenal/osf.io,DanielSBrown/osf.io,asanfilippo7/osf.io,cosenal/osf.io,emetsger/osf.io,kch8qx/osf.io,MerlinZhang/osf.io,wearpants/osf.io,cldershem/osf.io,petermalcolm/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,wearpants/osf.io,zamattiac/osf.io,abought/osf.io,kch8qx/osf.io,cldershem/osf.io,dplorimer/osf,hmoco/osf.io,billyhunt/osf.io,doublebits/osf.io,Ghalko/osf.io,HarryRybacki/osf.io,danielneis/osf.io,Nesiehr/osf.io
|
Add script to get public registrations for staff members
[skip ci]
|
# -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get public registrations for staff members
[skip ci]<commit_after>
|
# -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
|
Add script to get public registrations for staff members
[skip ci]# -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get public registrations for staff members
[skip ci]<commit_after># -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
|
|
f9bdf777a13404ba25e0e8cdf99a3554320529c9
|
tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py
|
tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
|
Add warnings to inspector DOM count unittest baselines.
|
Add warnings to inspector DOM count unittest baselines.
The unit test failure indicates a serious Document leak, where all
WebCore::Document loaded in Chrome is leaking.
This CL adds warning comments to the baseline to avoid regressions.
BUG=392121
NOTRY=true
Review URL: https://codereview.chromium.org/393123003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@284653 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
jaruba/chromium.src,Chilledheart/chromium,dednal/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,ltilve/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,ltilve/chromium,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,littlstar/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Jonekee/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,ltilve/chromium,littlstar/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,M4sse/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,jaruba/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,ltilve/chromium,jaruba/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,dednal/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,dednal/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,ltilve/chromium,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,dednal/chromium.src,Just-D/chromium-1,Jonekee/chromium.src
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
Add warnings to inspector DOM count unittest baselines.
The unit test failure indicates a serious Document leak, where all
WebCore::Document loaded in Chrome is leaking.
This CL adds warning comments to the baseline to avoid regressions.
BUG=392121
NOTRY=true
Review URL: https://codereview.chromium.org/393123003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@284653 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
|
<commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
<commit_msg>Add warnings to inspector DOM count unittest baselines.
The unit test failure indicates a serious Document leak, where all
WebCore::Document loaded in Chrome is leaking.
This CL adds warning comments to the baseline to avoid regressions.
BUG=392121
NOTRY=true
Review URL: https://codereview.chromium.org/393123003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@284653 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
Add warnings to inspector DOM count unittest baselines.
The unit test failure indicates a serious Document leak, where all
WebCore::Document loaded in Chrome is leaking.
This CL adds warning comments to the baseline to avoid regressions.
BUG=392121
NOTRY=true
Review URL: https://codereview.chromium.org/393123003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@284653 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
|
<commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
<commit_msg>Add warnings to inspector DOM count unittest baselines.
The unit test failure indicates a serious Document leak, where all
WebCore::Document loaded in Chrome is leaking.
This CL adds warning comments to the baseline to avoid regressions.
BUG=392121
NOTRY=true
Review URL: https://codereview.chromium.org/393123003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@284653 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
|
46be255fd0cfaeb2352f2f49b4ec5996a804768d
|
test/unit/handler/test_base.py
|
test/unit/handler/test_base.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
|
Add unit test for base Handler.
|
Add unit test for base Handler.
|
Python
|
apache-2.0
|
4degrees/sawmill,4degrees/mill
|
Add unit test for base Handler.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
|
<commit_before><commit_msg>Add unit test for base Handler.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
|
Add unit test for base Handler.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
|
<commit_before><commit_msg>Add unit test for base Handler.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
|
|
dcc5065c7cc4cc167affcbf906eaf81e73fa6d3e
|
py/set-mismatch.py
|
py/set-mismatch.py
|
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
|
Add py solution for 645. Set Mismatch
|
Add py solution for 645. Set Mismatch
645. Set Mismatch: https://leetcode.com/problems/set-mismatch/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 645. Set Mismatch
645. Set Mismatch: https://leetcode.com/problems/set-mismatch/
|
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
|
<commit_before><commit_msg>Add py solution for 645. Set Mismatch
645. Set Mismatch: https://leetcode.com/problems/set-mismatch/<commit_after>
|
class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
|
Add py solution for 645. Set Mismatch
645. Set Mismatch: https://leetcode.com/problems/set-mismatch/class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
|
<commit_before><commit_msg>Add py solution for 645. Set Mismatch
645. Set Mismatch: https://leetcode.com/problems/set-mismatch/<commit_after>class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
|
|
1b4bf232b9fd348a94b8bc4e9c851ed5b6d8e801
|
tests/config/test_room_directory.py
|
tests/config/test_room_directory.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
|
Add tests for config generation
|
Add tests for config generation
|
Python
|
apache-2.0
|
matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse
|
Add tests for config generation
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
|
<commit_before><commit_msg>Add tests for config generation<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
|
Add tests for config generation# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
|
<commit_before><commit_msg>Add tests for config generation<commit_after># -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
|
|
fefcc9ab57b5dc818690c4febc4250fffb0f9543
|
subs/modify_acl.py
|
subs/modify_acl.py
|
# Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
|
Add a new sub example regarding custom ACL modification
|
Add a new sub example regarding custom ACL modification
|
Python
|
apache-2.0
|
AlainMoretti/cli-wrapper
|
Add a new sub example regarding custom ACL modification
|
# Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a new sub example regarding custom ACL modification<commit_after>
|
# Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
|
Add a new sub example regarding custom ACL modification# Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a new sub example regarding custom ACL modification<commit_after># Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
|
|
8cc020949f1d7eb9c66121a7d3a762738cb44c2c
|
src/station_map.py
|
src/station_map.py
|
station_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
|
Add dictionary mapping abbreviations to station names
|
Add dictionary mapping abbreviations to station names
|
Python
|
mit
|
ganemone/SublimeBart,ganemone/SublimeBart,ganemone/SublimeBart,ganemone/SublimeBart
|
Add dictionary mapping abbreviations to station names
|
station_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
|
<commit_before><commit_msg>Add dictionary mapping abbreviations to station names<commit_after>
|
station_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
|
Add dictionary mapping abbreviations to station namesstation_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
|
<commit_before><commit_msg>Add dictionary mapping abbreviations to station names<commit_after>station_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
|
|
45dc85ded5a766191cd58d76a16470fc063d6e70
|
tests/test_httperror.py
|
tests/test_httperror.py
|
import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
|
Add error formatting tests for httperror
|
Add error formatting tests for httperror
|
Python
|
apache-2.0
|
racker/fleece,racker/fleece
|
Add error formatting tests for httperror
|
import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
|
<commit_before><commit_msg>Add error formatting tests for httperror<commit_after>
|
import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
|
Add error formatting tests for httperrorimport unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
|
<commit_before><commit_msg>Add error formatting tests for httperror<commit_after>import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
|
|
a627fa4c681bdd9de323750c3ab3f2cb0d5fca86
|
server/hoot/app.py
|
server/hoot/app.py
|
#!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
|
Add basic infrastructure for rest API
|
Add basic infrastructure for rest API
|
Python
|
mit
|
CatalystOfNostalgia/hoot,CatalystOfNostalgia/hoot
|
Add basic infrastructure for rest API
|
#!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Add basic infrastructure for rest API<commit_after>
|
#!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
|
Add basic infrastructure for rest API#!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Add basic infrastructure for rest API<commit_after>#!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
|
|
500df3f340d7782c759634529ae40ce56f7bec3e
|
plag.py
|
plag.py
|
from docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
|
Add first file Only read a .docx until now
|
Add first file
Only read a .docx until now
|
Python
|
apache-2.0
|
Psidium/NTL
|
Add first file
Only read a .docx until now
|
from docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
|
<commit_before><commit_msg>Add first file
Only read a .docx until now<commit_after>
|
from docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
|
Add first file
Only read a .docx until nowfrom docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
|
<commit_before><commit_msg>Add first file
Only read a .docx until now<commit_after>from docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
|
|
882de02df3131cf19eed5750428bcb79ce7f30c1
|
netprofile_access/migrations/f2d2359b923a_link_bindings_to_access_entities.py
|
netprofile_access/migrations/f2d2359b923a_link_bindings_to_access_entities.py
|
"""link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
|
Add DB migration for netdev bindings
|
Add DB migration for netdev bindings
|
Python
|
agpl-3.0
|
unikmhz/npui,unikmhz/npui,unikmhz/npui,unikmhz/npui
|
Add DB migration for netdev bindings
|
"""link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add DB migration for netdev bindings<commit_after>
|
"""link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
|
Add DB migration for netdev bindings"""link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add DB migration for netdev bindings<commit_after>"""link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
|
|
5c269bfeb517b70cfcb8fd730bf3eb983a5515dc
|
markov_batch_learn.py
|
markov_batch_learn.py
|
from __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
|
Create a quick script to train a COBE brain from a folder of formatted IRC logs
|
[Core] Create a quick script to train a COBE brain from a folder of formatted IRC logs
|
Python
|
mit
|
HubbeKing/Hubbot_Twisted
|
[Core] Create a quick script to train a COBE brain from a folder of formatted IRC logs
|
from __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
|
<commit_before><commit_msg>[Core] Create a quick script to train a COBE brain from a folder of formatted IRC logs<commit_after>
|
from __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
|
[Core] Create a quick script to train a COBE brain from a folder of formatted IRC logsfrom __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
|
<commit_before><commit_msg>[Core] Create a quick script to train a COBE brain from a folder of formatted IRC logs<commit_after>from __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
|
|
ddc3e45f5f84e5574090ee79875039e401864a49
|
IPython/core/tests/test_extension.py
|
IPython/core/tests/test_extension.py
|
import os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
|
Add test for extension loading and unloading
|
Add test for extension loading and unloading
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add test for extension loading and unloading
|
import os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
|
<commit_before><commit_msg>Add test for extension loading and unloading<commit_after>
|
import os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
|
Add test for extension loading and unloadingimport os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
|
<commit_before><commit_msg>Add test for extension loading and unloading<commit_after>import os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
|
|
fc97a838d54417cb063a7757040ff279f298d0bb
|
cookie_skel.py
|
cookie_skel.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
|
Add snip code for http.cookies
|
Add snip code for http.cookies
|
Python
|
cc0-1.0
|
JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology
|
Add snip code for http.cookies
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
|
<commit_before><commit_msg>Add snip code for http.cookies<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
|
Add snip code for http.cookies# -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
|
<commit_before><commit_msg>Add snip code for http.cookies<commit_after># -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
|
|
08636c9740b3103fd05c81791f43faeb29920305
|
test/test_utils.py
|
test/test_utils.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add tests for some util functions.
|
Add tests for some util functions.
git-svn-id: 353d90d4d8d13dcb4e0402680a9155a727f61a5a@1082063 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
aleGpereira/libcloud,sahildua2305/libcloud,pquentin/libcloud,Verizon/libcloud,wuyuewen/libcloud,aleGpereira/libcloud,iPlantCollaborativeOpenSource/libcloud,Verizon/libcloud,Jc2k/libcloud,Cloud-Elasticity-Services/as-libcloud,aviweit/libcloud,vongazman/libcloud,techhat/libcloud,ZuluPro/libcloud,pquentin/libcloud,illfelder/libcloud,lochiiconnectivity/libcloud,thesquelched/libcloud,mathspace/libcloud,mgogoulos/libcloud,t-tran/libcloud,mistio/libcloud,niteoweb/libcloud,erjohnso/libcloud,schaubl/libcloud,thesquelched/libcloud,Kami/libcloud,wrigri/libcloud,pantheon-systems/libcloud,curoverse/libcloud,cryptickp/libcloud,cloudControl/libcloud,carletes/libcloud,Cloud-Elasticity-Services/as-libcloud,wuyuewen/libcloud,DimensionDataCBUSydney/libcloud,StackPointCloud/libcloud,munkiat/libcloud,SecurityCompass/libcloud,ZuluPro/libcloud,wido/libcloud,marcinzaremba/libcloud,sgammon/libcloud,munkiat/libcloud,mathspace/libcloud,mbrukman/libcloud,wrigri/libcloud,watermelo/libcloud,pantheon-systems/libcloud,wido/libcloud,kater169/libcloud,SecurityCompass/libcloud,apache/libcloud,jimbobhickville/libcloud,JamesGuthrie/libcloud,mistio/libcloud,curoverse/libcloud,samuelchong/libcloud,NexusIS/libcloud,MrBasset/libcloud,illfelder/libcloud,t-tran/libcloud,mgogoulos/libcloud,Keisuke69/libcloud,watermelo/libcloud,pantheon-systems/libcloud,Jc2k/libcloud,techhat/libcloud,aleGpereira/libcloud,schaubl/libcloud,jimbobhickville/libcloud,SecurityCompass/libcloud,kater169/libcloud,ZuluPro/libcloud,ninefold/libcloud,lochiiconnectivity/libcloud,mtekel/libcloud,vongazman/libcloud,MrBasset/libcloud,jerryblakley/libcloud,MrBasset/libcloud,ByteInternet/libcloud,ClusterHQ/libcloud,cloudControl/libcloud,Kami/libcloud,Cloud-Elasticity-Services/as-libcloud,samuelchong/libcloud,supertom/libcloud,watermelo/libcloud,Itxaka/libcloud,Kami/libcloud,marcinzaremba/libcloud,Itxaka/libcloud,NexusIS/libcloud,dcorbacho/libcloud,sahildua2305/libcloud,cryptickp/libcloud,atsaki/libcloud,briancurtin/libcloud,niteoweb/libcloud,supertom/libcloud,marcinzaremba/libcloud,mtekel/libcloud,andrewsomething/libcloud,sfriesel/libcloud,cloudControl/libcloud,jimbobhickville/libcloud,wrigri/libcloud,sfriesel/libcloud,ByteInternet/libcloud,apache/libcloud,supertom/libcloud,wido/libcloud,briancurtin/libcloud,dcorbacho/libcloud,aviweit/libcloud,sergiorua/libcloud,ByteInternet/libcloud,ninefold/libcloud,samuelchong/libcloud,Scalr/libcloud,apache/libcloud,JamesGuthrie/libcloud,Scalr/libcloud,iPlantCollaborativeOpenSource/libcloud,t-tran/libcloud,smaffulli/libcloud,techhat/libcloud,ClusterHQ/libcloud,schaubl/libcloud,andrewsomething/libcloud,mtekel/libcloud,pquentin/libcloud,Keisuke69/libcloud,DimensionDataCBUSydney/libcloud,niteoweb/libcloud,smaffulli/libcloud,cryptickp/libcloud,vongazman/libcloud,mbrukman/libcloud,NexusIS/libcloud,Verizon/libcloud,kater169/libcloud,aviweit/libcloud,carletes/libcloud,iPlantCollaborativeOpenSource/libcloud,jerryblakley/libcloud,erjohnso/libcloud,erjohnso/libcloud,jerryblakley/libcloud,mathspace/libcloud,dcorbacho/libcloud,briancurtin/libcloud,lochiiconnectivity/libcloud,sergiorua/libcloud,sfriesel/libcloud,StackPointCloud/libcloud,StackPointCloud/libcloud,DimensionDataCBUSydney/libcloud,curoverse/libcloud,thesquelched/libcloud,Itxaka/libcloud,mistio/libcloud,carletes/libcloud,sergiorua/libcloud,smaffulli/libcloud,wuyuewen/libcloud,illfelder/libcloud,JamesGuthrie/libcloud,sahildua2305/libcloud,sgammon/libcloud,mgogoulos/libcloud,atsaki/libcloud,mbrukman/libcloud,atsaki/libcloud,Scalr/libcloud,andrewsomething/libcloud,munkiat/libcloud
|
Add tests for some util functions.
git-svn-id: 353d90d4d8d13dcb4e0402680a9155a727f61a5a@1082063 13f79535-47bb-0310-9956-ffa450edef68
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add tests for some util functions.
git-svn-id: 353d90d4d8d13dcb4e0402680a9155a727f61a5a@1082063 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add tests for some util functions.
git-svn-id: 353d90d4d8d13dcb4e0402680a9155a727f61a5a@1082063 13f79535-47bb-0310-9956-ffa450edef68# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add tests for some util functions.
git-svn-id: 353d90d4d8d13dcb4e0402680a9155a727f61a5a@1082063 13f79535-47bb-0310-9956-ffa450edef68<commit_after># -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
04740a33ab8b4d43cda71668ff7027ac7e5982d5
|
tests/test_cdav.py
|
tests/test_cdav.py
|
import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
|
Add test. This continues to need pytz and tzlocal.
|
Add test. This continues to need pytz and tzlocal.
|
Python
|
apache-2.0
|
python-caldav/caldav
|
Add test. This continues to need pytz and tzlocal.
|
import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
|
<commit_before><commit_msg>Add test. This continues to need pytz and tzlocal.<commit_after>
|
import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
|
Add test. This continues to need pytz and tzlocal.import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
|
<commit_before><commit_msg>Add test. This continues to need pytz and tzlocal.<commit_after>import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
|
|
7a70d230d3ceb3c37d718f138e80b132b9a05fae
|
edwin/teams/migrations/0005_auto_20150811_2236.py
|
edwin/teams/migrations/0005_auto_20150811_2236.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
|
Add migration for multiple repos per team.
|
Add migration for multiple repos per team.
|
Python
|
mpl-2.0
|
mythmon/edwin,mythmon/edwin,mythmon/edwin,mythmon/edwin
|
Add migration for multiple repos per team.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
|
<commit_before><commit_msg>Add migration for multiple repos per team.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
|
Add migration for multiple repos per team.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
|
<commit_before><commit_msg>Add migration for multiple repos per team.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
|
|
4c4891f24c0e5b093d3a9fcb0de86609b01a69c3
|
fellowms/migrations/0053_auto_20160804_1447.py
|
fellowms/migrations/0053_auto_20160804_1447.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
|
Add migration for replace location with country and city
|
Add migration for replace location with country and city
|
Python
|
bsd-3-clause
|
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
|
Add migration for replace location with country and city
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
|
<commit_before><commit_msg>Add migration for replace location with country and city<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
|
Add migration for replace location with country and city# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
|
<commit_before><commit_msg>Add migration for replace location with country and city<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
|
|
d1b7ed5f705c8e0935778636ade00a7452e2ea7f
|
project/holviapp/management/commands/import_holvidata.py
|
project/holviapp/management/commands/import_holvidata.py
|
# -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
|
Add management command for importing Holvi Invoices and Orders
|
Add management command for importing Holvi Invoices and Orders
Invoices may have multiple payments, abstracttransaction is generated
for each payment line.
Orders are paid in one go (they're from the webshop), one transaction
is generated from an Order.
|
Python
|
mit
|
rambo/asylum,rambo/asylum,hacklab-fi/asylum,jautero/asylum,HelsinkiHacklab/asylum,jautero/asylum,rambo/asylum,rambo/asylum,jautero/asylum,HelsinkiHacklab/asylum,hacklab-fi/asylum,hacklab-fi/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,jautero/asylum,hacklab-fi/asylum
|
Add management command for importing Holvi Invoices and Orders
Invoices may have multiple payments, abstracttransaction is generated
for each payment line.
Orders are paid in one go (they're from the webshop), one transaction
is generated from an Order.
|
# -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
|
<commit_before><commit_msg>Add management command for importing Holvi Invoices and Orders
Invoices may have multiple payments, abstracttransaction is generated
for each payment line.
Orders are paid in one go (they're from the webshop), one transaction
is generated from an Order.<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
|
Add management command for importing Holvi Invoices and Orders
Invoices may have multiple payments, abstracttransaction is generated
for each payment line.
Orders are paid in one go (they're from the webshop), one transaction
is generated from an Order.# -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
|
<commit_before><commit_msg>Add management command for importing Holvi Invoices and Orders
Invoices may have multiple payments, abstracttransaction is generated
for each payment line.
Orders are paid in one go (they're from the webshop), one transaction
is generated from an Order.<commit_after># -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
|
|
d942340fb5cfe8aa9aade11b3117b9848097c8a1
|
alerts/geomodel/journal.py
|
alerts/geomodel/journal.py
|
'''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
|
Write an abstraction for storing locality state in ES
|
Write an abstraction for storing locality state in ES
|
Python
|
mpl-2.0
|
jeffbryner/MozDef,mozilla/MozDef,mpurzynski/MozDef,jeffbryner/MozDef,mozilla/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mozilla/MozDef,mozilla/MozDef,jeffbryner/MozDef,jeffbryner/MozDef
|
Write an abstraction for storing locality state in ES
|
'''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
|
<commit_before><commit_msg>Write an abstraction for storing locality state in ES<commit_after>
|
'''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
|
Write an abstraction for storing locality state in ES'''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
|
<commit_before><commit_msg>Write an abstraction for storing locality state in ES<commit_after>'''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
|
|
b1f964e9725a18014de17d454bb733b7ad43cd38
|
pytac/write_to_file_readback_pvs.py
|
pytac/write_to_file_readback_pvs.py
|
import pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
|
Write Pytac script to write all readback pvs to file
|
Write Pytac script to write all readback pvs to file
|
Python
|
apache-2.0
|
razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects
|
Write Pytac script to write all readback pvs to file
|
import pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Write Pytac script to write all readback pvs to file<commit_after>
|
import pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
|
Write Pytac script to write all readback pvs to fileimport pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Write Pytac script to write all readback pvs to file<commit_after>import pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
|
|
0083a6fadad8bb0f202bab2af183a10f09e19459
|
piglow/demo_piglow.py
|
piglow/demo_piglow.py
|
from piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
|
Add simple demo of piglow - lighting arms
|
Add simple demo of piglow - lighting arms
|
Python
|
mit
|
claremacrae/raspi_code,claremacrae/raspi_code,claremacrae/raspi_code
|
Add simple demo of piglow - lighting arms
|
from piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
|
<commit_before><commit_msg>Add simple demo of piglow - lighting arms<commit_after>
|
from piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
|
Add simple demo of piglow - lighting armsfrom piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
|
<commit_before><commit_msg>Add simple demo of piglow - lighting arms<commit_after>from piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
|
|
b3f8be5b6ab7e4e713004447a3cfbda743d80394
|
rules/management/commands/CorpusLogicUpdate.py
|
rules/management/commands/CorpusLogicUpdate.py
|
import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
Add management command to update corpus logic hashes
|
Add management command to update corpus logic hashes
|
Python
|
apache-2.0
|
PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian
|
Add management command to update corpus logic hashes
|
import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
<commit_before><commit_msg>Add management command to update corpus logic hashes<commit_after>
|
import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
Add management command to update corpus logic hashesimport logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
<commit_before><commit_msg>Add management command to update corpus logic hashes<commit_after>import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
|
|
16f29bfc832a64accd6ef67c2140f70ea07f2f05
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
Add PyUnit for deep feature extraction of a LeNet model with mxnet.
|
Add PyUnit for deep feature extraction of a LeNet model with mxnet.
|
Python
|
apache-2.0
|
michalkurka/h2o-3,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,h2oai/h2o-dev
|
Add PyUnit for deep feature extraction of a LeNet model with mxnet.
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
<commit_before><commit_msg>Add PyUnit for deep feature extraction of a LeNet model with mxnet.<commit_after>
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
Add PyUnit for deep feature extraction of a LeNet model with mxnet.from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
<commit_before><commit_msg>Add PyUnit for deep feature extraction of a LeNet model with mxnet.<commit_after>from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
|
5ee78767ebaa5c1bbceb7ce2c82fa6687169b0c2
|
codingame/medium/paranoid_android.py
|
codingame/medium/paranoid_android.py
|
class Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
|
Add exercice The Paranoid Android
|
Add exercice The Paranoid Android
|
Python
|
mit
|
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
|
Add exercice The Paranoid Android
|
class Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
|
<commit_before><commit_msg>Add exercice The Paranoid Android<commit_after>
|
class Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
|
Add exercice The Paranoid Androidclass Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
|
<commit_before><commit_msg>Add exercice The Paranoid Android<commit_after>class Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
|
|
114ea6c10658d2c199c68637d04bdd968fcc4452
|
voyager_tasks/test/test_info_files.py
|
voyager_tasks/test/test_info_files.py
|
import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
|
Test case for task.info.json files
|
Test case for task.info.json files
Ensures all tasks have .info.json file and have a valid structure.
|
Python
|
apache-2.0
|
voyagersearch/voyager-py,voyagersearch/voyager-py
|
Test case for task.info.json files
Ensures all tasks have .info.json file and have a valid structure.
|
import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test case for task.info.json files
Ensures all tasks have .info.json file and have a valid structure.<commit_after>
|
import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
|
Test case for task.info.json files
Ensures all tasks have .info.json file and have a valid structure.import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test case for task.info.json files
Ensures all tasks have .info.json file and have a valid structure.<commit_after>import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
|
|
ce9ac96a6f1e57ebbce162b7e097675c23f1f2f4
|
projects/jakub/gaussian_processes/gaussian_process_regression.py
|
projects/jakub/gaussian_processes/gaussian_process_regression.py
|
import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
|
Implement simple gaussian process regression.
|
Implement simple gaussian process regression.
|
Python
|
bsd-3-clause
|
alasdairtran/mclearn,chengsoonong/mclass-sky,alasdairtran/mclearn,alasdairtran/mclearn,chengsoonong/mclass-sky,chengsoonong/mclass-sky,alasdairtran/mclearn,chengsoonong/mclass-sky
|
Implement simple gaussian process regression.
|
import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
|
<commit_before><commit_msg>Implement simple gaussian process regression.<commit_after>
|
import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
|
Implement simple gaussian process regression.import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
|
<commit_before><commit_msg>Implement simple gaussian process regression.<commit_after>import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
|
|
6c4edaefe30905f62b885b931a1c5ca6d65cd220
|
server/tests/models/test_project.py
|
server/tests/models/test_project.py
|
from server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2
|
Add tests for project model
|
Add tests for project model
|
Python
|
mit
|
ganemone/ontheside,ganemone/ontheside,ganemone/ontheside
|
Add tests for project model
|
from server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2
|
<commit_before><commit_msg>Add tests for project model<commit_after>
|
from server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2
|
Add tests for project modelfrom server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2
|
<commit_before><commit_msg>Add tests for project model<commit_after>from server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.