commit
stringlengths 40
40
| old_file
stringlengths 4
150
| new_file
stringlengths 4
150
| old_contents
stringlengths 0
3.26k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
501
| message
stringlengths 15
4.06k
| lang
stringclasses 4
values | license
stringclasses 13
values | repos
stringlengths 5
91.5k
| diff
stringlengths 0
4.35k
|
|---|---|---|---|---|---|---|---|---|---|---|
bad66654ee2a2688a4931dc88c617ea962c8cdf4
|
hairball/plugins/duplicate.py
|
hairball/plugins/duplicate.py
|
"""This module provides plugins for basic duplicate code detection."""
from hairball.plugins import HairballPlugin
class DuplicateChecks(HairballPlugin):
"""Plugin that keeps track of which scripts have been
used more than once whithin a project."""
def __init__(self):
super(DuplicateChecks, self).__init__()
self.total_duplicate = 0
self.list_duplicate = []
def finalize(self):
"""Output the duplicate scripts detected."""
if self.total_duplicate > 0:
print("%d duplicate scripts found" % self.total_duplicate)
for duplicate in self.list_duplicate:
print duplicate
def analyze(self, scratch):
"""Run and return the results from the DuplicateChecks plugin."""
scripts_set = set()
for script in self.iter_scripts(scratch):
blocks_list = []
for name, _, _ in self.iter_blocks(script.blocks):
blocks_list.append(name)
blocks_tuple = tuple(blocks_list)
if blocks_tuple in scripts_set:
if len(blocks_list)>3:
self.total_duplicate += 1
self.list_duplicate.append(blocks_list)
else:
scripts_set.add(blocks_tuple)
|
"""This module provides plugins for basic duplicate code detection."""
from hairball.plugins import HairballPlugin
class DuplicateScripts(HairballPlugin):
"""Plugin that keeps track of which scripts have been
used more than once whithin a project."""
def __init__(self):
super(DuplicateScripts, self).__init__()
self.total_duplicate = 0
self.list_duplicate = []
def finalize(self):
"""Output the duplicate scripts detected."""
if self.total_duplicate > 0:
print("%d duplicate scripts found" % self.total_duplicate)
for duplicate in self.list_duplicate:
print duplicate
def analyze(self, scratch):
"""Run and return the results from the DuplicateChecks plugin."""
scripts_set = set()
for script in self.iter_scripts(scratch):
blocks_list = []
for name, _, _ in self.iter_blocks(script.blocks):
blocks_list.append(name)
blocks_tuple = tuple(blocks_list)
if blocks_tuple in scripts_set:
if len(blocks_list)>3:
self.total_duplicate += 1
self.list_duplicate.append(blocks_list)
else:
scripts_set.add(blocks_tuple)
|
Change class name to DuplicateScripts
|
Change class name to DuplicateScripts
|
Python
|
bsd-2-clause
|
thsunmy/hairball,jemole/hairball,ucsb-cs-education/hairball,thsunmy/hairball,jemole/hairball,ucsb-cs-education/hairball
|
---
+++
@@ -2,13 +2,13 @@
from hairball.plugins import HairballPlugin
-class DuplicateChecks(HairballPlugin):
+class DuplicateScripts(HairballPlugin):
"""Plugin that keeps track of which scripts have been
used more than once whithin a project."""
def __init__(self):
- super(DuplicateChecks, self).__init__()
+ super(DuplicateScripts, self).__init__()
self.total_duplicate = 0
self.list_duplicate = []
|
ae74abbe809332a68c3e68f7ad19e0b1b2259f0f
|
tests/__init__.py
|
tests/__init__.py
|
"""Tests for running TopoFlow components in CMI."""
import os
def locate_topoflow(cache_dir):
for x in os.listdir(cache_dir):
if x.startswith('topoflow'):
return x
root_dir = '/home/csdms/wmt/topoflow.1'
cache_dir = os.path.join(root_dir, 'cache')
topoflow_dir = locate_topoflow(cache_dir)
example_dir = os.path.join(cache_dir, topoflow_dir,
'topoflow', 'examples', 'Treynor_Iowa')
|
"""Tests for running TopoFlow components in CMI."""
import os
def locate_topoflow(cache_dir):
for x in os.listdir(cache_dir):
if x.startswith('topoflow'):
return x
root_dir = '/home/csdms/wmt/topoflow.0'
cache_dir = os.path.join(root_dir, 'cache')
topoflow_dir = locate_topoflow(cache_dir)
example_dir = os.path.join(cache_dir, topoflow_dir,
'topoflow', 'examples', 'Treynor_Iowa')
|
Update root directory for tests
|
Update root directory for tests
|
Python
|
mit
|
Elchin/topoflow-cmi-testing,mdpiper/topoflow-cmi-testing
|
---
+++
@@ -8,7 +8,7 @@
return x
-root_dir = '/home/csdms/wmt/topoflow.1'
+root_dir = '/home/csdms/wmt/topoflow.0'
cache_dir = os.path.join(root_dir, 'cache')
topoflow_dir = locate_topoflow(cache_dir)
example_dir = os.path.join(cache_dir, topoflow_dir,
|
311549ce2dd126063b5e0b1e3476cbae78a4d6d5
|
tests/__init__.py
|
tests/__init__.py
|
from flexmock import flexmock
from flask.ext.storage import MockStorage
from flask_uploads import init
created_objects = []
added_objects = []
deleted_objects = []
committed_objects = []
class MockModel(object):
def __init__(self, **kw):
created_objects.append(self)
for key, val in kw.iteritems():
setattr(self, key, val)
db_mock = flexmock(
Column=lambda *a, **kw: ('column', a, kw),
Integer=('integer', [], {}),
Unicode=lambda *a, **kw: ('unicode', a, kw),
Model=MockModel,
session=flexmock(
add=added_objects.append,
commit=lambda: committed_objects.extend(
added_objects + deleted_objects
),
delete=deleted_objects.append,
),
)
class TestCase(object):
def setup_method(self, method, resizer=None):
init(db_mock, MockStorage, resizer)
self.db = db_mock
self.Storage = MockStorage
self.resizer = resizer
|
from flexmock import flexmock
from flask.ext.storage import MockStorage
from flask_uploads import init
class TestCase(object):
added_objects = []
committed_objects = []
created_objects = []
deleted_objects = []
def setup_method(self, method, resizer=None):
init(db_mock, MockStorage, resizer)
self.db = db_mock
self.Storage = MockStorage
self.storage = MockStorage()
self.resizer = resizer
def teardown_method(self, method):
# Empty the stacks.
TestCase.added_objects[:] = []
TestCase.committed_objects[:] = []
TestCase.created_objects[:] = []
TestCase.deleted_objects[:] = []
class MockModel(object):
def __init__(self, **kw):
TestCase.created_objects.append(self)
for key, val in kw.iteritems():
setattr(self, key, val)
db_mock = flexmock(
Column=lambda *a, **kw: ('column', a, kw),
Integer=('integer', [], {}),
Unicode=lambda *a, **kw: ('unicode', a, kw),
Model=MockModel,
session=flexmock(
add=TestCase.added_objects.append,
commit=lambda: TestCase.committed_objects.extend(
TestCase.added_objects + TestCase.deleted_objects
),
delete=TestCase.deleted_objects.append,
),
)
|
Fix problems in test init.
|
Fix problems in test init.
|
Python
|
mit
|
FelixLoether/flask-uploads,FelixLoether/flask-image-upload-thing
|
---
+++
@@ -2,17 +2,34 @@
from flask.ext.storage import MockStorage
from flask_uploads import init
-created_objects = []
-added_objects = []
-deleted_objects = []
-committed_objects = []
+
+class TestCase(object):
+ added_objects = []
+ committed_objects = []
+ created_objects = []
+ deleted_objects = []
+
+ def setup_method(self, method, resizer=None):
+ init(db_mock, MockStorage, resizer)
+ self.db = db_mock
+ self.Storage = MockStorage
+ self.storage = MockStorage()
+ self.resizer = resizer
+
+ def teardown_method(self, method):
+ # Empty the stacks.
+ TestCase.added_objects[:] = []
+ TestCase.committed_objects[:] = []
+ TestCase.created_objects[:] = []
+ TestCase.deleted_objects[:] = []
class MockModel(object):
def __init__(self, **kw):
- created_objects.append(self)
+ TestCase.created_objects.append(self)
for key, val in kw.iteritems():
setattr(self, key, val)
+
db_mock = flexmock(
Column=lambda *a, **kw: ('column', a, kw),
@@ -20,18 +37,10 @@
Unicode=lambda *a, **kw: ('unicode', a, kw),
Model=MockModel,
session=flexmock(
- add=added_objects.append,
- commit=lambda: committed_objects.extend(
- added_objects + deleted_objects
+ add=TestCase.added_objects.append,
+ commit=lambda: TestCase.committed_objects.extend(
+ TestCase.added_objects + TestCase.deleted_objects
),
- delete=deleted_objects.append,
+ delete=TestCase.deleted_objects.append,
),
)
-
-
-class TestCase(object):
- def setup_method(self, method, resizer=None):
- init(db_mock, MockStorage, resizer)
- self.db = db_mock
- self.Storage = MockStorage
- self.resizer = resizer
|
d48a15c9585dfdb4441a0ca58041d064defe19b2
|
libs/utils.py
|
libs/utils.py
|
from django.core.cache import cache
def cache_get_key(*args, **kwargs):
"""Get the cache key for storage"""
import hashlib
serialise = []
for arg in args:
serialise.append(str(arg))
for key,arg in kwargs.items():
if key == "clear_cache":
continue
serialise.append(str(key))
serialise.append(str(arg))
key = hashlib.md5("".join(serialise)).hexdigest()
return key
def cache_for(time):
"""Decorator for caching functions"""
def decorator(fn):
def wrapper(*args, **kwargs):
key = cache_get_key(fn.__name__, *args, **kwargs)
result = cache.get(key)
if not result or "clear_cache" in kwargs and kwargs["clear_cache"]:
cache.delete(key)
result = fn(*args, **kwargs)
cache.set(key, result, time)
return result
return wrapper
return decorator
|
from django.core.cache import cache
def cache_get_key(*args, **kwargs):
"""Get the cache key for storage"""
import hashlib
serialise = []
for arg in args:
serialise.append(str(arg))
for key,arg in kwargs.items():
if key == "clear_cache":
continue
serialise.append(str(key))
serialise.append(str(arg))
key = hashlib.md5("".join(serialise)).hexdigest()
return key
def cache_for(time):
"""Decorator for caching functions"""
def decorator(fn):
def wrapper(*args, **kwargs):
key = cache_get_key(fn.__name__, *args, **kwargs)
result = cache.get(key)
if not result or "clear_cache" in kwargs and kwargs["clear_cache"]:
cache.delete(key)
result = fn(*args, **kwargs)
cache.set(key, result, time)
return result + "<!-- cache key: %s -->" % key
return wrapper
return decorator
|
Add cache key in HTML source
|
Add cache key in HTML source
|
Python
|
mit
|
daigotanaka/kawaraban,daigotanaka/kawaraban,daigotanaka/kawaraban,daigotanaka/kawaraban
|
---
+++
@@ -26,6 +26,6 @@
cache.delete(key)
result = fn(*args, **kwargs)
cache.set(key, result, time)
- return result
+ return result + "<!-- cache key: %s -->" % key
return wrapper
return decorator
|
4612f10a8d4dcd0ec7133b12411387c74becbdb7
|
tests/__init__.py
|
tests/__init__.py
|
import sublime
import os
import os.path
import unittest
class CommandTestCase(unittest.TestCase):
def setUp(self):
self.project_data = {
'code_search': {'csearchindex': 'test_csearchindex'},
'folders': [{'path': '.'}]}
sublime.active_window().run_command('new_window')
self.window = sublime.active_window()
self.window.set_project_data(self.project_data)
self.view = self.window.new_file()
def tearDown(self):
self.view.set_scratch(True)
self.window.focus_view(self.view)
self.window.run_command('close_file')
self.window.run_command('close_window')
if os.path.isfile('test_csearchindex'):
os.remove('test_csearchindex')
|
import sublime
import os
import os.path
import unittest
class CommandTestCase(unittest.TestCase):
def setUp(self):
path = '{0}/YetAnotherCodeSearch'.format(sublime.packages_path())
self.project_data = {
'code_search': {'csearchindex': 'test_csearchindex'},
'folders': [{'path': path}]}
sublime.active_window().run_command('new_window')
self.window = sublime.active_window()
self.window.set_project_data(self.project_data)
self.view = self.window.new_file()
def tearDown(self):
self.view.set_scratch(True)
self.window.focus_view(self.view)
self.window.run_command('close_file')
self.window.run_command('close_window')
if os.path.isfile('test_csearchindex'):
os.remove('test_csearchindex')
|
Set the test path to the project in Packages.
|
Set the test path to the project in Packages.
|
Python
|
mit
|
pope/SublimeYetAnotherCodeSearch,pope/SublimeYetAnotherCodeSearch
|
---
+++
@@ -8,9 +8,10 @@
class CommandTestCase(unittest.TestCase):
def setUp(self):
+ path = '{0}/YetAnotherCodeSearch'.format(sublime.packages_path())
self.project_data = {
'code_search': {'csearchindex': 'test_csearchindex'},
- 'folders': [{'path': '.'}]}
+ 'folders': [{'path': path}]}
sublime.active_window().run_command('new_window')
self.window = sublime.active_window()
self.window.set_project_data(self.project_data)
|
8a645abd1880fdac72e36f7366ae81fa13bf78ae
|
app/main/views/digital_outcomes_and_specialists.py
|
app/main/views/digital_outcomes_and_specialists.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from app import data_api_client
from flask import abort, render_template
from ...helpers.buyers_helpers import get_framework_and_lot
from ...main import main
@main.route('/buyers/frameworks/<framework_slug>/requirements/user-research-studios', methods=['GET'])
def studios_start_page(framework_slug):
framework = data_api_client.get_framework(framework_slug)['frameworks']
if framework['status'] != 'live':
abort(404)
return render_template(
"buyers/studios_start_page.html"
), 200
@main.route('/buyers/frameworks/<framework_slug>/requirements/<lot_slug>', methods=['GET'])
def info_page_for_starting_a_brief(framework_slug, lot_slug):
framework, lot = get_framework_and_lot(framework_slug, lot_slug, data_api_client,
status='live', must_allow_brief=True)
return render_template(
"buyers/start_brief_info.html",
framework=framework,
lot=lot
), 200
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from app import data_api_client
from flask import abort, render_template
from ...helpers.buyers_helpers import get_framework_and_lot
from ...main import main
@main.route('/buyers/frameworks/<framework_slug>/requirements/user-research-studios', methods=['GET'])
def studios_start_page(framework_slug):
# Check framework is live and has the user-research-studios lot
get_framework_and_lot(framework_slug, 'user-research-studios', data_api_client, status='live')
return render_template(
"buyers/studios_start_page.html"
), 200
@main.route('/buyers/frameworks/<framework_slug>/requirements/<lot_slug>', methods=['GET'])
def info_page_for_starting_a_brief(framework_slug, lot_slug):
framework, lot = get_framework_and_lot(framework_slug, lot_slug, data_api_client,
status='live', must_allow_brief=True)
return render_template(
"buyers/start_brief_info.html",
framework=framework,
lot=lot
), 200
|
Check framework has the studios lot before showing start page
|
Check framework has the studios lot before showing start page
|
Python
|
mit
|
AusDTO/dto-digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,alphagov/digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend,AusDTO/dto-digitalmarketplace-buyer-frontend
|
---
+++
@@ -9,9 +9,8 @@
@main.route('/buyers/frameworks/<framework_slug>/requirements/user-research-studios', methods=['GET'])
def studios_start_page(framework_slug):
- framework = data_api_client.get_framework(framework_slug)['frameworks']
- if framework['status'] != 'live':
- abort(404)
+ # Check framework is live and has the user-research-studios lot
+ get_framework_and_lot(framework_slug, 'user-research-studios', data_api_client, status='live')
return render_template(
"buyers/studios_start_page.html"
|
993c4c98fb9529946669b4d13e6c5a9ff4ab3f67
|
tests/test_mpi.py
|
tests/test_mpi.py
|
from mpi4py import MPI
import pytest
from devito import Grid, Function, Distributor
@pytest.mark.parallel(nprocs=2)
def test_hello_mpi():
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
print("Hello, World! I am rank %d of %d on %s" % (rank, size, name), flush=True)
@pytest.mark.parallel(nprocs=2)
def test_basic_partitioning():
grid = Grid(shape=(10, 10, 10)) # Gonna use a default distributor underneath
f = Function(name='f', grid=grid)
from IPython import embed; embed()
|
from mpi4py import MPI
import pytest
from devito import Grid, Function
@pytest.mark.parallel(nprocs=2)
def test_hello_mpi():
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
name = MPI.Get_processor_name()
print("Hello, World! I am rank %d of %d on %s" % (rank, size, name), flush=True)
@pytest.mark.parallel(nprocs=[2, 4])
def test_basic_partitioning():
grid = Grid(shape=(15, 15)) # Gonna use a default distributor underneath
f = Function(name='f', grid=grid)
distributor = grid._distributor
expected = { # nprocs -> [(rank0 shape), (rank1 shape), ...]
2: [(8, 15), (7, 15)],
4: [(8, 8), (8, 7), (7, 8), (7, 7)]
}
assert f.shape == expected[distributor.nprocs][distributor.rank]
|
Check domain decomposition over Functions
|
tests: Check domain decomposition over Functions
|
Python
|
mit
|
opesci/devito,opesci/devito
|
---
+++
@@ -2,7 +2,7 @@
import pytest
-from devito import Grid, Function, Distributor
+from devito import Grid, Function
@pytest.mark.parallel(nprocs=2)
@@ -14,8 +14,14 @@
print("Hello, World! I am rank %d of %d on %s" % (rank, size, name), flush=True)
-@pytest.mark.parallel(nprocs=2)
+@pytest.mark.parallel(nprocs=[2, 4])
def test_basic_partitioning():
- grid = Grid(shape=(10, 10, 10)) # Gonna use a default distributor underneath
+ grid = Grid(shape=(15, 15)) # Gonna use a default distributor underneath
f = Function(name='f', grid=grid)
- from IPython import embed; embed()
+
+ distributor = grid._distributor
+ expected = { # nprocs -> [(rank0 shape), (rank1 shape), ...]
+ 2: [(8, 15), (7, 15)],
+ 4: [(8, 8), (8, 7), (7, 8), (7, 7)]
+ }
+ assert f.shape == expected[distributor.nprocs][distributor.rank]
|
64a3526448b0e025bd75062a93d24c0072cbbf43
|
themint/server.py
|
themint/server.py
|
from flask import request, make_response
import json
from themint import app
from themint.service import message_service
@app.route('/', methods=['GET'])
def index():
return "Mint OK"
# TODO remove <title_number> below, as it is not used.
@app.route('/titles/<title_number>', methods=['POST'])
def post(title_number):
try:
message_service.wrap_message_for_system_of_record(request.json)
app.logger.info("Minting new title with payload %s" % (request.json))
return make_response(
json.dumps({
'message': 'OK',
'status_code': 201
}),
201)
except Exception as e:
app.logger.error('Error when minting new', exc_info=e)
return make_response(
json.dumps({
'message': 'Error',
'status_code': 400
}),
400)
|
from flask import request, make_response
import json
from themint import app
from themint.service import message_service
from datatypes.exceptions import DataDoesNotMatchSchemaException
@app.route('/', methods=['GET'])
def index():
return "Mint OK"
# TODO remove <title_number> below, as it is not used.
@app.route('/titles/<title_number>', methods=['POST'])
def post(title_number):
try:
message_service.wrap_message_for_system_of_record(request.json)
app.logger.info("Minting new title with payload %s" % (request.json))
return make_response(
json.dumps({
'message': 'OK',
'status_code': 201
}),
201)
except DataDoesNotMatchSchemaException as e:
app.logger.error('Validation error with data sent to mint %s' % e.field_errors)
return make_response(
json.dumps({
'error': e.field_errors
}), 400)
except Exception as e:
app.logger.error('Error when minting new', exc_info=e)
return make_response(
json.dumps({
'message': 'Error',
'status_code': 400
}),
400)
|
Return validation error messages to client
|
Return validation error messages to client
|
Python
|
mit
|
LandRegistry/mint-alpha,LandRegistry/mint-alpha
|
---
+++
@@ -3,6 +3,8 @@
from themint import app
from themint.service import message_service
+
+from datatypes.exceptions import DataDoesNotMatchSchemaException
@app.route('/', methods=['GET'])
def index():
@@ -19,9 +21,17 @@
return make_response(
json.dumps({
'message': 'OK',
- 'status_code': 201
+ 'status_code': 201
}),
201)
+
+ except DataDoesNotMatchSchemaException as e:
+ app.logger.error('Validation error with data sent to mint %s' % e.field_errors)
+ return make_response(
+ json.dumps({
+ 'error': e.field_errors
+ }), 400)
+
except Exception as e:
app.logger.error('Error when minting new', exc_info=e)
return make_response(
|
654219bf00dc4a029d9e42779c3ad2d552948596
|
plim/extensions.py
|
plim/extensions.py
|
from docutils.core import publish_string
import coffeescript
from scss import Scss
from stylus import Stylus
from .util import as_unicode
def rst_to_html(source):
# This code was taken from http://wiki.python.org/moin/ReStructuredText
# You may also be interested in http://www.tele3.cz/jbar/rest/about.html
html = publish_string(source=source, writer_name='html')
return html[html.find('<body>')+6:html.find('</body>')].strip()
def coffee_to_js(source):
return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source))
def scss_to_css(source):
css = Scss().compile(source).strip()
return as_unicode('<style>{css}</style>').format(css=css)
def stylus_to_css(source):
compiler = Stylus(plugins={'nib':{}})
return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
|
from docutils.core import publish_parts
import coffeescript
from scss import Scss
from stylus import Stylus
from .util import as_unicode
def rst_to_html(source):
# This code was taken from http://wiki.python.org/moin/ReStructuredText
# You may also be interested in http://www.tele3.cz/jbar/rest/about.html
html = publish_parts(source=source, writer_name='html')
return html['html_body']
def coffee_to_js(source):
return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source))
def scss_to_css(source):
css = Scss().compile(source).strip()
return as_unicode('<style>{css}</style>').format(css=css)
def stylus_to_css(source):
compiler = Stylus(plugins={'nib':{}})
return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
|
Fix ReStructuredText extension in Python3 environment
|
Fix ReStructuredText extension in Python3 environment
|
Python
|
mit
|
kxxoling/Plim
|
---
+++
@@ -1,4 +1,4 @@
-from docutils.core import publish_string
+from docutils.core import publish_parts
import coffeescript
from scss import Scss
from stylus import Stylus
@@ -10,8 +10,8 @@
def rst_to_html(source):
# This code was taken from http://wiki.python.org/moin/ReStructuredText
# You may also be interested in http://www.tele3.cz/jbar/rest/about.html
- html = publish_string(source=source, writer_name='html')
- return html[html.find('<body>')+6:html.find('</body>')].strip()
+ html = publish_parts(source=source, writer_name='html')
+ return html['html_body']
def coffee_to_js(source):
|
8f0fb4c39e8c6fcfc4ee507933e935028e213ba9
|
ditto/twitter/migrations/0014_auto_20150819_1342.py
|
ditto/twitter/migrations/0014_auto_20150819_1342.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('twitter', '0013_user_favorites'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='favorite_count',
field=models.PositiveIntegerField(default=b'', help_text=b'Approximately how many times this had been favorited when fetched', blank=True),
),
migrations.AlterField(
model_name='tweet',
name='retweet_count',
field=models.PositiveIntegerField(default=b'', help_text=b'Number of times this had been retweeted when fetched', blank=True),
),
migrations.AlterField(
model_name='user',
name='url',
field=models.URLField(default=b'', help_text=b'A URL provided by the user as part of their profile', blank=True),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('twitter', '0013_user_favorites'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='favorite_count',
field=models.PositiveIntegerField(default=0, help_text=b'Approximately how many times this had been favorited when fetched', blank=True),
),
migrations.AlterField(
model_name='tweet',
name='retweet_count',
field=models.PositiveIntegerField(default=0, help_text=b'Number of times this had been retweeted when fetched', blank=True),
),
migrations.AlterField(
model_name='user',
name='url',
field=models.URLField(default=b'', help_text=b'A URL provided by the user as part of their profile', blank=True),
),
]
|
Fix broken default value in a Twitter migration
|
Fix broken default value in a Twitter migration
|
Python
|
mit
|
philgyford/django-ditto,philgyford/django-ditto,philgyford/django-ditto
|
---
+++
@@ -14,12 +14,12 @@
migrations.AlterField(
model_name='tweet',
name='favorite_count',
- field=models.PositiveIntegerField(default=b'', help_text=b'Approximately how many times this had been favorited when fetched', blank=True),
+ field=models.PositiveIntegerField(default=0, help_text=b'Approximately how many times this had been favorited when fetched', blank=True),
),
migrations.AlterField(
model_name='tweet',
name='retweet_count',
- field=models.PositiveIntegerField(default=b'', help_text=b'Number of times this had been retweeted when fetched', blank=True),
+ field=models.PositiveIntegerField(default=0, help_text=b'Number of times this had been retweeted when fetched', blank=True),
),
migrations.AlterField(
model_name='user',
|
7b33941dc14e2be4940a425107d668a8913eed53
|
ovp_users/serializers.py
|
ovp_users/serializers.py
|
from ovp_users import models
from rest_framework import serializers
class UserCreateSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ['name', 'email', 'password']
class UserSearchSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ['name']
class RecoveryTokenSerializer(serializers.Serializer):
email = serializers.CharField(required=True)
class Meta:
fields = ['email']
class RecoverPasswordSerializer(serializers.Serializer):
email = serializers.CharField(required=True)
token = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
class Meta:
fields = ['email', 'token', 'new_password']
|
from django.core.exceptions import ValidationError
from django.contrib.auth.password_validation import validate_password
from ovp_users import models
from rest_framework import serializers
class UserCreateSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ['name', 'email', 'password']
def validate(self, data):
password = data.get('password')
errors = dict()
try:
validate_password(password=password)
pass
except ValidationError as e:
errors['password'] = list(e.messages)
if errors:
raise serializers.ValidationError(errors)
return super(UserCreateSerializer, self).validate(data)
class UserSearchSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ['name']
class RecoveryTokenSerializer(serializers.Serializer):
email = serializers.CharField(required=True)
class Meta:
fields = ['email']
class RecoverPasswordSerializer(serializers.Serializer):
email = serializers.CharField(required=True)
token = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
class Meta:
fields = ['email', 'token', 'new_password']
|
Validate user password on creation
|
Validate user password on creation
|
Python
|
agpl-3.0
|
OpenVolunteeringPlatform/django-ovp-users,OpenVolunteeringPlatform/django-ovp-users
|
---
+++
@@ -1,3 +1,5 @@
+from django.core.exceptions import ValidationError
+from django.contrib.auth.password_validation import validate_password
from ovp_users import models
from rest_framework import serializers
@@ -5,6 +7,21 @@
class Meta:
model = models.User
fields = ['name', 'email', 'password']
+
+ def validate(self, data):
+ password = data.get('password')
+
+ errors = dict()
+ try:
+ validate_password(password=password)
+ pass
+ except ValidationError as e:
+ errors['password'] = list(e.messages)
+
+ if errors:
+ raise serializers.ValidationError(errors)
+
+ return super(UserCreateSerializer, self).validate(data)
class UserSearchSerializer(serializers.ModelSerializer):
class Meta:
|
5346741d0d5360cdf776252dcbe400ff839ab9fc
|
hs_core/tests/api/rest/test_resource_types.py
|
hs_core/tests/api/rest/test_resource_types.py
|
import json
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
from hs_core.hydroshare.utils import get_resource_types
class TestResourceTypes(APITestCase):
def setUp(self):
self.client = APIClient()
def test_resource_typelist(self):
resource_types = set([t.__name__ for t in get_resource_types()])
response = self.client.get('/hsapi/resourceTypes/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
rest_resource_types = set([t['resource_type'] for t in content])
self.assertEqual(resource_types, rest_resource_types)
|
import json
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
class TestResourceTypes(APITestCase):
def setUp(self):
self.client = APIClient()
# Use a static list so that this test breaks when a resource type is
# added or removed (so that the test can be updated)
self.resource_types = {'GenericResource',
'RasterResource',
'RefTimeSeriesResource',
'TimeSeriesResource',
'NetcdfResource',
'ModelProgramResource',
'ModelInstanceResource',
'ToolResource',
'SWATModelInstanceResource',
'GeographicFeatureResource',
'ScriptResource'}
def test_resource_typelist(self):
response = self.client.get('/hsapi/resourceTypes/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
rest_resource_types = set([t['resource_type'] for t in content])
self.assertEqual(self.resource_types, rest_resource_types)
|
Make resource type list static
|
Make resource type list static
|
Python
|
bsd-3-clause
|
FescueFungiShare/hydroshare,hydroshare/hydroshare,FescueFungiShare/hydroshare,hydroshare/hydroshare,ResearchSoftwareInstitute/MyHPOM,ResearchSoftwareInstitute/MyHPOM,FescueFungiShare/hydroshare,ResearchSoftwareInstitute/MyHPOM,RENCI/xDCIShare,RENCI/xDCIShare,FescueFungiShare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,RENCI/xDCIShare,ResearchSoftwareInstitute/MyHPOM,RENCI/xDCIShare,FescueFungiShare/hydroshare,hydroshare/hydroshare,RENCI/xDCIShare,ResearchSoftwareInstitute/MyHPOM
|
---
+++
@@ -3,8 +3,6 @@
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
-
-from hs_core.hydroshare.utils import get_resource_types
class TestResourceTypes(APITestCase):
@@ -12,12 +10,24 @@
def setUp(self):
self.client = APIClient()
+ # Use a static list so that this test breaks when a resource type is
+ # added or removed (so that the test can be updated)
+ self.resource_types = {'GenericResource',
+ 'RasterResource',
+ 'RefTimeSeriesResource',
+ 'TimeSeriesResource',
+ 'NetcdfResource',
+ 'ModelProgramResource',
+ 'ModelInstanceResource',
+ 'ToolResource',
+ 'SWATModelInstanceResource',
+ 'GeographicFeatureResource',
+ 'ScriptResource'}
+
def test_resource_typelist(self):
- resource_types = set([t.__name__ for t in get_resource_types()])
-
response = self.client.get('/hsapi/resourceTypes/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = json.loads(response.content)
rest_resource_types = set([t['resource_type'] for t in content])
- self.assertEqual(resource_types, rest_resource_types)
+ self.assertEqual(self.resource_types, rest_resource_types)
|
987c94a2a7d283ba4b231f332d0362f47c2e7a2a
|
BootstrapUpdater.py
|
BootstrapUpdater.py
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
import os
import shutil
import subprocess
bootstrap_updater_version = 1
BootstrapDownloads = 'BootstrapDownloads/'
BootstrapPrograms = 'BootstrapPrograms/'
bootstrap = 'https://www.dropbox.com/s/0zhbgb1ftspcv9w/polygon4.zip?dl=1'
bootstrap_zip = BootstrapDownloads + 'bootstrap.zip'
_7z = BootstrapPrograms + '7za'
curl = BootstrapPrograms + 'curl'
def main():
print_version()
if os.path.exists(bootstrap_zip):
shutil.copy(bootstrap_zip, bootstrap_zip + '.bak')
download_file(bootstrap, bootstrap_zip)
unpack_file(bootstrap_zip)
def download_file(url, file):
print('Downloading file: ' + file)
p = subprocess.Popen([curl, '-L', '-k', '-o', file, url])
p.communicate()
print()
def unpack_file(file):
print('Unpacking file: ' + file)
p = subprocess.Popen([_7z, 'x', '-y', file], stdout = subprocess.PIPE)
p.communicate()
def print_version():
print('Polygon-4 Bootstrap Updater Version ' + str(bootstrap_updater_version))
print()
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
import os
import shutil
import subprocess
bootstrap_updater_version = 1
BootstrapDownloads = 'BootstrapDownloads/'
BootstrapPrograms = 'BootstrapPrograms/'
bootstrap = 'https://www.dropbox.com/s/0zhbgb1ftspcv9w/polygon4.zip?dl=1'
bootstrap_zip = BootstrapDownloads + 'bootstrap.zip'
_7z = BootstrapPrograms + '7za'
curl = BootstrapPrograms + 'curl'
def main():
print_version()
if os.path.exists(BootstrapDownloads) == False:
os.mkdir(BootstrapDownloads)
if os.path.exists(bootstrap_zip):
shutil.copy(bootstrap_zip, bootstrap_zip + '.bak')
download_file(bootstrap, bootstrap_zip)
unpack_file(bootstrap_zip)
def download_file(url, file):
print('Downloading file: ' + file)
p = subprocess.Popen([curl, '-L', '-k', '-o', file, url])
p.communicate()
print()
def unpack_file(file):
print('Unpacking file: ' + file)
p = subprocess.Popen([_7z, 'x', '-y', file], stdout = subprocess.PIPE)
p.communicate()
def print_version():
print('Polygon-4 Bootstrap Updater Version ' + str(bootstrap_updater_version))
print()
if __name__ == '__main__':
main()
|
Create download directory if not exists.
|
Create download directory if not exists.
|
Python
|
agpl-3.0
|
aimrebirth/BootstrapPy
|
---
+++
@@ -18,6 +18,8 @@
def main():
print_version()
+ if os.path.exists(BootstrapDownloads) == False:
+ os.mkdir(BootstrapDownloads)
if os.path.exists(bootstrap_zip):
shutil.copy(bootstrap_zip, bootstrap_zip + '.bak')
download_file(bootstrap, bootstrap_zip)
|
776c1dbda3871c2b94d849ea59db25f93bb59525
|
src/mmw/apps/water_balance/views.py
|
src/mmw/apps/water_balance/views.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.shortcuts import render_to_response
def home_page(request):
return render_to_response('home_page/index.html')
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.shortcuts import render_to_response
from django.template import RequestContext
def home_page(request):
return render_to_response('home_page/index.html', RequestContext(request))
|
Add RequestContext to Micro site
|
Add RequestContext to Micro site
This allows us to populate settings variables such as Google Analytics
codes. See original work done for #769.
Refs #920
|
Python
|
apache-2.0
|
lliss/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,lliss/model-my-watershed,lliss/model-my-watershed,kdeloach/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,project-icp/bee-pollinator-app,WikiWatershed/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,lliss/model-my-watershed,project-icp/bee-pollinator-app
|
---
+++
@@ -4,7 +4,8 @@
from __future__ import division
from django.shortcuts import render_to_response
+from django.template import RequestContext
def home_page(request):
- return render_to_response('home_page/index.html')
+ return render_to_response('home_page/index.html', RequestContext(request))
|
670a72728ea7462972f3578b62cf33c5740187c2
|
locust/rpc/protocol.py
|
locust/rpc/protocol.py
|
import msgpack
class Message(object):
def __init__(self, message_type, data, node_id):
self.type = message_type
self.data = data
self.node_id = node_id
def serialize(self):
return msgpack.dumps((self.type, self.data, self.node_id))
@classmethod
def unserialize(cls, data):
msg = cls(*msgpack.loads(data, raw=False))
return msg
|
import msgpack
class Message(object):
def __init__(self, message_type, data, node_id):
self.type = message_type
self.data = data
self.node_id = node_id
def __repr__(self):
return "<Message %s:%s>" % (self.type, self.node_id)
def serialize(self):
return msgpack.dumps((self.type, self.data, self.node_id))
@classmethod
def unserialize(cls, data):
msg = cls(*msgpack.loads(data, raw=False))
return msg
|
Add Message.__repr__ for better debugging
|
Add Message.__repr__ for better debugging
|
Python
|
mit
|
mbeacom/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust,locustio/locust
|
---
+++
@@ -6,6 +6,9 @@
self.type = message_type
self.data = data
self.node_id = node_id
+
+ def __repr__(self):
+ return "<Message %s:%s>" % (self.type, self.node_id)
def serialize(self):
return msgpack.dumps((self.type, self.data, self.node_id))
|
0286a26fc19b0474a45ecd4a8a6d0bb1e6afab02
|
tests/acceptance/response_test.py
|
tests/acceptance/response_test.py
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
'pyramid_swagger.enable_swagger_spec_validation': False,
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
Disable spec validation on 3x
|
Disable spec validation on 3x
|
Python
|
bsd-3-clause
|
prat0318/pyramid_swagger,analogue/pyramid_swagger,striglia/pyramid_swagger,brianthelion/pyramid_swagger,striglia/pyramid_swagger
|
---
+++
@@ -18,6 +18,7 @@
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
+ 'pyramid_swagger.enable_swagger_spec_validation': False,
}
test_app(settings).get(
'/sample/path_arg1/resource',
|
828215d3de3ddd2febdd190de067b0f6e5c2e9e1
|
query/migrations/0017_auto_20160224_1306.py
|
query/migrations/0017_auto_20160224_1306.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from query.operations import engine_specific
class Migration(migrations.Migration):
dependencies = [
('query', '0016_auto_20160203_1324'),
]
operations = [
engine_specific(('mysql',),
migrations.RunSQL(
'alter table query_term modify word varchar(200) character set utf8 collate utf8_bin;',
'alter table query_term modify word varchar(200) character set utf8 collate utf8_general_ci;'
)
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from query.operations import engine_specific
class Migration(migrations.Migration):
dependencies = [
('query', '0016_auto_20160203_1324'),
]
operations = [
engine_specific(('mysql',),
migrations.RunSQL(
sql='alter table query_term modify word varchar(200) character set utf8 collate utf8_bin;',
reverse_sql='alter table query_term modify word varchar(200) character set utf8 collate utf8_general_ci;'
)
),
]
|
Use named arguments for RunSQL
|
Use named arguments for RunSQL
|
Python
|
apache-2.0
|
UUDigitalHumanitieslab/texcavator,UUDigitalHumanitieslab/texcavator,UUDigitalHumanitieslab/texcavator
|
---
+++
@@ -15,8 +15,8 @@
operations = [
engine_specific(('mysql',),
migrations.RunSQL(
- 'alter table query_term modify word varchar(200) character set utf8 collate utf8_bin;',
- 'alter table query_term modify word varchar(200) character set utf8 collate utf8_general_ci;'
+ sql='alter table query_term modify word varchar(200) character set utf8 collate utf8_bin;',
+ reverse_sql='alter table query_term modify word varchar(200) character set utf8 collate utf8_general_ci;'
)
),
]
|
d8dfdb68eae38f9126335e7123f39badafd73493
|
src/modules/prisjakt.py
|
src/modules/prisjakt.py
|
import asyncio
import json
import urllib.parse
import aiohttp
import waterbug
class Commands:
@waterbug.expose
class prisjakt:
@waterbug.expose
@asyncio.coroutine
def search(responder, *line):
qstring = urllib.parse.urlencode({
"class": "Search_Supersearch",
"method": "search",
"skip_login": 1,
"modes": "product",
"limit": 3,
"q": responder.line
})
url = "http://www.prisjakt.nu/ajax/server.php?{}".format(qstring)
print("Running search")
try:
response = yield from asyncio.wait_for(aiohttp.request('GET', url), 5)
except (asyncio.TimeoutError, aiohttp.HttpException):
responder("Couldn't fetch result")
return
print("Ran search")
body = json.loads((yield from response.read_and_close()).decode('utf-8'))
for item in body['message']['product']['items']:
responder("{name} ({price[display]}) - {url}".format(**item))
if body['message']['product']['more_hits_available']:
responder("More: http://www.prisjakt.nu/search.php?{}".format(
urllib.parse.urlencode({"s": responder.line})))
|
import asyncio
import json
import urllib.parse
import aiohttp
import waterbug
class Commands:
@waterbug.expose
class prisjakt:
@waterbug.expose
@asyncio.coroutine
def search(responder, *line):
qstring = urllib.parse.urlencode({
"class": "Search_Supersearch",
"method": "search",
"skip_login": 1,
"modes": "product",
"limit": 3,
"q": responder.line
})
url = "http://www.prisjakt.nu/ajax/server.php?{}".format(qstring)
print("Running search")
try:
response = yield from asyncio.wait_for(aiohttp.request('GET', url), 5)
except (asyncio.TimeoutError, aiohttp.HttpException):
responder("Couldn't fetch result")
return
print("Ran search")
body = json.loads((yield from response.read_and_close()).decode('utf-8'))
product = body['message']['product']
if len(product['items']) > 0:
for item in body['message']['product']['items']:
responder("{name} ({price[display]}) - {url}".format(**item))
if product['more_hits_available']:
responder("More: http://www.prisjakt.nu/search.php?{}".format(
urllib.parse.urlencode({"s": responder.line})))
else:
responder("No products found")
|
Print error in case of no results
|
Print error in case of no results
|
Python
|
agpl-3.0
|
BeholdMyGlory/waterbug
|
---
+++
@@ -34,10 +34,14 @@
print("Ran search")
body = json.loads((yield from response.read_and_close()).decode('utf-8'))
+ product = body['message']['product']
- for item in body['message']['product']['items']:
- responder("{name} ({price[display]}) - {url}".format(**item))
+ if len(product['items']) > 0:
+ for item in body['message']['product']['items']:
+ responder("{name} ({price[display]}) - {url}".format(**item))
- if body['message']['product']['more_hits_available']:
- responder("More: http://www.prisjakt.nu/search.php?{}".format(
- urllib.parse.urlencode({"s": responder.line})))
+ if product['more_hits_available']:
+ responder("More: http://www.prisjakt.nu/search.php?{}".format(
+ urllib.parse.urlencode({"s": responder.line})))
+ else:
+ responder("No products found")
|
6a957fd279ed1b305879bcfa41515c2a6e6d423c
|
mediacloud/mediawords/util/perl.py
|
mediacloud/mediawords/util/perl.py
|
#
# Perl (Inline::Perl) helpers
#
# FIXME MC_REWRITE_TO_PYTHON: remove after porting all Perl code to Python
def decode_string_from_bytes_if_needed(string):
"""Convert 'bytes' string to 'unicode' if needed.
(http://search.cpan.org/dist/Inline-Python/Python.pod#PORTING_YOUR_INLINE_PYTHON_CODE_FROM_2_TO_3)"""
if string is not None:
if isinstance(string, bytes):
string = string.decode('utf-8')
return string
# FIXME MC_REWRITE_TO_PYTHON: remove after porting all Perl code to Python
def decode_object_from_bytes_if_needed(obj):
if isinstance(obj, dict):
result = dict()
for k, v in obj.items():
k = decode_object_from_bytes_if_needed(k)
v = decode_object_from_bytes_if_needed(v)
result[k] = v
elif isinstance(obj, list):
result = list()
for v in obj:
v = decode_object_from_bytes_if_needed(v)
result.append(v)
else:
result = decode_string_from_bytes_if_needed(obj)
return result
|
#
# Perl (Inline::Perl) helpers
#
# FIXME MC_REWRITE_TO_PYTHON: remove after porting all Perl code to Python
def decode_string_from_bytes_if_needed(string):
"""Convert 'bytes' string to 'unicode' if needed.
(http://search.cpan.org/dist/Inline-Python/Python.pod#PORTING_YOUR_INLINE_PYTHON_CODE_FROM_2_TO_3)"""
if string is not None:
if isinstance(string, bytes):
string = string.decode('utf-8')
return string
# FIXME MC_REWRITE_TO_PYTHON: remove after porting all Perl code to Python
def decode_object_from_bytes_if_needed(obj):
"""Convert object (dictionary, list or string) from 'bytes' string to 'unicode' if needed."""
if isinstance(obj, dict):
result = dict()
for k, v in obj.items():
k = decode_object_from_bytes_if_needed(k)
v = decode_object_from_bytes_if_needed(v)
result[k] = v
elif isinstance(obj, list):
result = list()
for v in obj:
v = decode_object_from_bytes_if_needed(v)
result.append(v)
else:
result = decode_string_from_bytes_if_needed(obj)
return result
|
Add comment describing what does the method do
|
Add comment describing what does the method do
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
---
+++
@@ -15,6 +15,7 @@
# FIXME MC_REWRITE_TO_PYTHON: remove after porting all Perl code to Python
def decode_object_from_bytes_if_needed(obj):
+ """Convert object (dictionary, list or string) from 'bytes' string to 'unicode' if needed."""
if isinstance(obj, dict):
result = dict()
for k, v in obj.items():
|
fb2f66adf5ba60d2cda934ef27125ce84057367e
|
PCbuild/rmpyc.py
|
PCbuild/rmpyc.py
|
# Remove all the .pyc and .pyo files under ../Lib.
def deltree(root):
import os
def rm(path):
os.unlink(path)
npyc = npyo = 0
dirs = [root]
while dirs:
dir = dirs.pop()
for short in os.listdir(dir):
full = os.path.join(dir, short)
if os.path.isdir(full):
dirs.append(full)
elif short.endswith(".pyc"):
npyc += 1
rm(full)
elif short.endswith(".pyo"):
npyo += 1
rm(full)
return npyc, npyo
npyc, npyo = deltree("../Lib")
print npyc, ".pyc deleted,", npyo, ".pyo deleted"
|
# Remove all the .pyc and .pyo files under ../Lib.
def deltree(root):
import os
from os.path import join
npyc = npyo = 0
for root, dirs, files in os.walk(root):
for name in files:
delete = False
if name.endswith('.pyc'):
delete = True
npyc += 1
elif name.endswith('.pyo'):
delete = True
npyo += 1
if delete:
os.remove(join(root, name))
return npyc, npyo
npyc, npyo = deltree("../Lib")
print npyc, ".pyc deleted,", npyo, ".pyo deleted"
|
Use os.walk() to find files to delete.
|
Use os.walk() to find files to delete.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
---
+++
@@ -1,23 +1,24 @@
# Remove all the .pyc and .pyo files under ../Lib.
+
def deltree(root):
import os
- def rm(path):
- os.unlink(path)
+ from os.path import join
+
npyc = npyo = 0
- dirs = [root]
- while dirs:
- dir = dirs.pop()
- for short in os.listdir(dir):
- full = os.path.join(dir, short)
- if os.path.isdir(full):
- dirs.append(full)
- elif short.endswith(".pyc"):
+ for root, dirs, files in os.walk(root):
+ for name in files:
+ delete = False
+ if name.endswith('.pyc'):
+ delete = True
npyc += 1
- rm(full)
- elif short.endswith(".pyo"):
+ elif name.endswith('.pyo'):
+ delete = True
npyo += 1
- rm(full)
+
+ if delete:
+ os.remove(join(root, name))
+
return npyc, npyo
npyc, npyo = deltree("../Lib")
|
d301cbeb4e6f248ed137a9d1a6b6f39558231cc3
|
tests/functional/test_vcs_mercurial.py
|
tests/functional/test_vcs_mercurial.py
|
from pip._internal.vcs.mercurial import Mercurial
from tests.lib import _create_test_package
def test_get_repository_root(script):
version_pkg_path = _create_test_package(script, vcs="hg")
tests_path = version_pkg_path.joinpath("tests")
tests_path.mkdir()
root1 = Mercurial.get_repository_root(version_pkg_path)
assert root1 == version_pkg_path
root2 = Mercurial.get_repository_root(version_pkg_path.joinpath("tests"))
assert root2 == version_pkg_path
|
from pip._internal.vcs.mercurial import Mercurial
from tests.lib import _create_test_package, need_mercurial
@need_mercurial
def test_get_repository_root(script):
version_pkg_path = _create_test_package(script, vcs="hg")
tests_path = version_pkg_path.joinpath("tests")
tests_path.mkdir()
root1 = Mercurial.get_repository_root(version_pkg_path)
assert root1 == version_pkg_path
root2 = Mercurial.get_repository_root(version_pkg_path.joinpath("tests"))
assert root2 == version_pkg_path
|
Add marker to Mercurial test
|
Add marker to Mercurial test
|
Python
|
mit
|
pradyunsg/pip,pfmoore/pip,pradyunsg/pip,pypa/pip,pfmoore/pip,pypa/pip,sbidoul/pip,sbidoul/pip
|
---
+++
@@ -1,7 +1,8 @@
from pip._internal.vcs.mercurial import Mercurial
-from tests.lib import _create_test_package
+from tests.lib import _create_test_package, need_mercurial
+@need_mercurial
def test_get_repository_root(script):
version_pkg_path = _create_test_package(script, vcs="hg")
tests_path = version_pkg_path.joinpath("tests")
|
7c1886cf8751281e1b41e80341a045b46c2c38f5
|
querylist/betterdict.py
|
querylist/betterdict.py
|
# Attribute prefix for allowing dotlookups when keys conflict with dict
# attributes.
PREFIX = '_bd_'
class BetterDict(dict):
def __init__(self, *args, **kwargs):
# Prefix that will be appended to keys for dot lookups that would
# otherwise conflict with dict attributes.
self.__prefix = PREFIX
# Determine the attributes a dictionary has. We use this to prevent
# exposing properties that conflict with a dict's properties
self.__dict_attrs = dir(dict)
return super(BetterDict, self).__init__(*args, **kwargs)
def __getattr__(self, attr):
# Don't attempt lookups for things that conflict with dict attrs
if attr in self.__dict_attrs:
raise AttributeError
# If the requested attribute is prefixed with self.__prefix,
# we need to unprefix it and do a lookup for that key.
if attr.startswith(self.__prefix) and attr != self.__prefix:
unprefixed_attr = attr.partition(self.__prefix)[-1]
if unprefixed_attr in self and (
unprefixed_attr in self.__dict_attrs or
unprefixed_attr.startswith(self.__prefix)):
return self.__dict_to_BetterDict(self[unprefixed_attr])
if attr in self:
return self.__dict_to_BetterDict(self[attr])
raise AttributeError
def __dict_to_BetterDict(self, value):
"""Convert the passed value to a BetterDict if the value is a dict"""
return BetterDict(value) if type(value) == dict else value
|
# Attribute prefix for allowing dotlookups when keys conflict with dict
# attributes.
PREFIX = '_bd_'
class BetterDict(dict):
def __init__(self, *args, **kwargs):
# Prefix that will be appended to keys for dot lookups that would
# otherwise conflict with dict attributes.
self.__prefix = PREFIX
# Determine the attributes a dictionary has. We use this to prevent
# exposing properties that conflict with a dict's properties
self.__dict_attrs = dir(dict)
return super(BetterDict, self).__init__(*args, **kwargs)
def __getattr__(self, attr):
# If the requested attribute is prefixed with self.__prefix,
# we need to unprefix it and do a lookup for that key.
if attr.startswith(self.__prefix) and attr != self.__prefix:
unprefixed_attr = attr.partition(self.__prefix)[-1]
if unprefixed_attr in self and (
unprefixed_attr in self.__dict_attrs or
unprefixed_attr.startswith(self.__prefix)):
return self.__dict_to_BetterDict(self[unprefixed_attr])
if attr in self:
return self.__dict_to_BetterDict(self[attr])
raise AttributeError
def __dict_to_BetterDict(self, value):
"""Convert the passed value to a BetterDict if the value is a dict"""
return BetterDict(value) if type(value) == dict else value
|
Remove unnecessary check for dict attr conflicts.
|
Remove unnecessary check for dict attr conflicts.
|
Python
|
mit
|
thomasw/querylist,zoidbergwill/querylist
|
---
+++
@@ -16,10 +16,6 @@
return super(BetterDict, self).__init__(*args, **kwargs)
def __getattr__(self, attr):
- # Don't attempt lookups for things that conflict with dict attrs
- if attr in self.__dict_attrs:
- raise AttributeError
-
# If the requested attribute is prefixed with self.__prefix,
# we need to unprefix it and do a lookup for that key.
if attr.startswith(self.__prefix) and attr != self.__prefix:
|
3f54454c2eec9378d7bef836f37967c044f88faa
|
django_olcc/olcc/management/commands/olccimport.py
|
django_olcc/olcc/management/commands/olccimport.py
|
import os
import xlrd
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
"""
"""
args = "<filename>"
help = "Parses an excel document of OLCC price data."
def handle(self, *args, **options):
try:
filename = args[0]
if not filename:
# Get latest file and hash for update
# ...
pass
self.stdout.write("Importing from \"%s\"...\n" % filename)
# Import workbook
wb = xlrd.open_workbook(filename);
self.stdout.write("Sheet Names:\n%s\n" % wb.sheet_names())
# Get the first sheet
sheet = wb.sheet_by_index(0)
# Loop over worksheet
#[u'Report Date: ', '', 40863.0, '', '', '']
#['', '', '', '', '', '']
#[u'Item Code', u'Item Status', u'Description', u'Size', u'Bottles per Case', u'Bottle Price']
#[u'0102B', u'@', u'GLENFIDDICH SNOW PHOENIX', u'750 ML', 6.0, 92.950000000000003]
for rownum in range(sheet.nrows):
print sheet.row_values(rownum)
except IOError as (errno, strerror):
raise CommandError("%s" % strerror)
except IndexError:
raise CommandError("You must specify a filename!")
|
import os
import xlrd
from django.core.management.base import BaseCommand, CommandError
from olcc.models import Product
class Command(BaseCommand):
"""
:todo: Use optparse to add a --quiet option to supress all output except errors.
:todo: Write a separate management command to fetch the latest price document.
"""
args = "<filename>"
help = "Parses an excel document of OLCC price data."
def handle(self, *args, **options):
try:
filename = args[0]
if not filename:
# Get latest file and hash for update
# ...
pass
self.stdout.write("Importing from \"%s\"...\n" % filename)
# Import workbook
wb = xlrd.open_workbook(filename)
self.stdout.write("Sheet Names:\n%s\n" % wb.sheet_names())
# Get the first sheet
sheet = wb.sheet_by_index(0)
# Loop over worksheet
for rownum in range(sheet.nrows):
values = sheet.row_values(rownum)
if len(values) > 0:
try:
# TODO: Updating products
Product.from_row(values)
except IndexError:
pass
except IOError as (errno, strerror):
raise CommandError("%s" % strerror)
except IndexError:
raise CommandError("You must specify a filename!")
|
Clean up the management command stub in preparation of further development.
|
Clean up the management command stub in preparation of further development.
|
Python
|
mit
|
twaddington/django-olcc,twaddington/django-olcc,twaddington/django-olcc
|
---
+++
@@ -1,9 +1,14 @@
import os
import xlrd
+
from django.core.management.base import BaseCommand, CommandError
+
+from olcc.models import Product
class Command(BaseCommand):
"""
+ :todo: Use optparse to add a --quiet option to supress all output except errors.
+ :todo: Write a separate management command to fetch the latest price document.
"""
args = "<filename>"
help = "Parses an excel document of OLCC price data."
@@ -18,19 +23,21 @@
self.stdout.write("Importing from \"%s\"...\n" % filename)
# Import workbook
- wb = xlrd.open_workbook(filename);
+ wb = xlrd.open_workbook(filename)
self.stdout.write("Sheet Names:\n%s\n" % wb.sheet_names())
# Get the first sheet
sheet = wb.sheet_by_index(0)
# Loop over worksheet
- #[u'Report Date: ', '', 40863.0, '', '', '']
- #['', '', '', '', '', '']
- #[u'Item Code', u'Item Status', u'Description', u'Size', u'Bottles per Case', u'Bottle Price']
- #[u'0102B', u'@', u'GLENFIDDICH SNOW PHOENIX', u'750 ML', 6.0, 92.950000000000003]
for rownum in range(sheet.nrows):
- print sheet.row_values(rownum)
+ values = sheet.row_values(rownum)
+ if len(values) > 0:
+ try:
+ # TODO: Updating products
+ Product.from_row(values)
+ except IndexError:
+ pass
except IOError as (errno, strerror):
raise CommandError("%s" % strerror)
except IndexError:
|
fc036a2cc7bd3200d98ed833343e116f4ce32bf1
|
kitchen/text/exceptions.py
|
kitchen/text/exceptions.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Red Hat, Inc
#
# kitchen is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# kitchen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
# Authors:
# Toshio Kuratomi <toshio@fedoraproject.org>
#
from kitchen import exceptions
class XmlEncodeError(exceptions.KitchenException):
'''Exception thrown by error conditions when encoding an xml string.
'''
pass
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Red Hat, Inc
#
# kitchen is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# kitchen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
# Authors:
# Toshio Kuratomi <toshio@fedoraproject.org>
#
from kitchen import exceptions
class XmlEncodeError(exceptions.KitchenException):
'''Exception thrown by error conditions when encoding an xml string.
'''
pass
class ControlCharError(exceptions.KitchenException):
'''Exception thrown when an ascii control character is encountered.
'''
pass
|
Add ControlCharError for process_control_chars function
|
Add ControlCharError for process_control_chars function
|
Python
|
lgpl-2.1
|
fedora-infra/kitchen,fedora-infra/kitchen
|
---
+++
@@ -24,3 +24,8 @@
'''Exception thrown by error conditions when encoding an xml string.
'''
pass
+
+class ControlCharError(exceptions.KitchenException):
+ '''Exception thrown when an ascii control character is encountered.
+ '''
+ pass
|
e0c19574995224fe56fad411ce6f0796b71f8af5
|
l10n_br_zip/__openerp__.py
|
l10n_br_zip/__openerp__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
Change the version of module.
|
[MIG] Change the version of module.
|
Python
|
agpl-3.0
|
akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil
|
---
+++
@@ -6,7 +6,7 @@
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
- 'version': '8.0.1.0.1',
+ 'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
@@ -18,7 +18,9 @@
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
- 'test': ['test/zip_demo.yml'],
+ 'test': [
+ 'test/zip_demo.yml'
+ ],
'category': 'Localization',
- 'installable': False,
+ 'installable': True,
}
|
52b0833fb597f7a6e9de4d5da768bb2bc8f5f012
|
dope/__init__.py
|
dope/__init__.py
|
#!/usr/bin/env python
# coding=utf8
from flask import Flask
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
import model
import defaults
from views.frontend import frontend, oid
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(defaults)
app.config.from_pyfile(config_filename)
# init db connection
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], encoding = 'utf8', echo = app.config['SQLALCHEMY_ECHO'])
app.session = scoped_session(sessionmaker(bind = engine))
@app.after_request
def shutdown_session(response):
app.session.remove()
return response
app.storage = model.FileStorage(app.config['FILE_STORAGE'])
app.oid = oid
app.oid.init_app(app)
# load modules
app.register_module(frontend)
return app
|
#!/usr/bin/env python
# coding=utf8
from flask import Flask
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
import model
import defaults
from views.frontend import frontend, oid
def create_app(config_filename):
app = Flask(__name__)
app.config.from_object(defaults)
app.config.from_pyfile(config_filename)
# init db connection
app.engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], encoding = 'utf8', echo = app.config['SQLALCHEMY_ECHO'])
app.session = scoped_session(sessionmaker(bind = app.engine))
@app.after_request
def shutdown_session(response):
app.session.remove()
return response
app.storage = model.FileStorage(app.config['FILE_STORAGE'])
app.oid = oid
app.oid.init_app(app)
# load modules
app.register_module(frontend)
return app
|
Make engine available in app.
|
Make engine available in app.
|
Python
|
mit
|
mbr/dope,mbr/dope
|
---
+++
@@ -15,8 +15,8 @@
app.config.from_pyfile(config_filename)
# init db connection
- engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], encoding = 'utf8', echo = app.config['SQLALCHEMY_ECHO'])
- app.session = scoped_session(sessionmaker(bind = engine))
+ app.engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'], encoding = 'utf8', echo = app.config['SQLALCHEMY_ECHO'])
+ app.session = scoped_session(sessionmaker(bind = app.engine))
@app.after_request
def shutdown_session(response):
|
a06607c9fa5a248000edeba6a392a3ecdd531507
|
src/tempel/models.py
|
src/tempel/models.py
|
from django.db import models
from django.conf import settings
from tempel import utils
class Entry(models.Model):
content = models.TextField()
language = models.CharField(max_length=20,
choices=utils.get_languages())
created = models.DateTimeField(auto_now=True, auto_now_add=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ['-created']
verbose_name_plural = "entries"
|
from django.db import models
from django.conf import settings
from tempel import utils
class Entry(models.Model):
content = models.TextField()
language = models.CharField(max_length=20,
choices=utils.get_languages())
created = models.DateTimeField(auto_now=True, auto_now_add=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ['-created']
verbose_name_plural = "entries"
def get_language(self):
return utils.get_language(self.language)
def get_mimetype(self):
return utils.get_mimetype(self.language)
def get_filename(self):
return '%s.%s' % (self.id, self.get_extension())
def get_extension(self):
return utils.get_extension(self.language)
|
Add functions to EntryModel to get language, mimetype, filename, and extension
|
Add functions to EntryModel to get language, mimetype, filename, and extension
|
Python
|
agpl-3.0
|
fajran/tempel
|
---
+++
@@ -13,5 +13,17 @@
class Meta:
ordering = ['-created']
verbose_name_plural = "entries"
-
+ def get_language(self):
+ return utils.get_language(self.language)
+
+ def get_mimetype(self):
+ return utils.get_mimetype(self.language)
+
+ def get_filename(self):
+ return '%s.%s' % (self.id, self.get_extension())
+
+ def get_extension(self):
+ return utils.get_extension(self.language)
+
+
|
3e0b91b310afb64589e934a18fd75e767b75e43f
|
project/settings_prod.py
|
project/settings_prod.py
|
from project.settings_common import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# CACHE
from memcacheify import memcacheify
CACHES = memcacheify()
MIDDLEWARE_CLASSES += (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
#STATIC_URL = 'https://s3.amazonaws.com/lobbyingph/'
STATIC_URL = 'http://commondatastorage.googleapis.com/lobbyingph/'
AWS_ACCESS_KEY_ID = os.environ['AWS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']
AWS_STORAGE_BUCKET_NAME = 'lobbyingph'
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
|
from project.settings_common import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# CACHE
from memcacheify import memcacheify
CACHES = memcacheify()
MIDDLEWARE_CLASSES += (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
# Google cloud for static files
STATIC_URL = 'http://commondatastorage.googleapis.com/lobbyingph/'
# AWS s3 for static files
#STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
#STATIC_URL = 'https://s3.amazonaws.com/lobbyingph/''
#AWS_ACCESS_KEY_ID = os.environ['AWS_KEY_ID']
#AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']
#AWS_STORAGE_BUCKET_NAME = 'lobbyingph'
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
|
Comment out s3 settings. It was breaking admin static file serve
|
Comment out s3 settings. It was breaking admin static file serve
|
Python
|
mit
|
AxisPhilly/lobbying.ph-django,AxisPhilly/lobbying.ph-django,AxisPhilly/lobbying.ph-django
|
---
+++
@@ -15,13 +15,15 @@
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
-STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
-#STATIC_URL = 'https://s3.amazonaws.com/lobbyingph/'
+# Google cloud for static files
STATIC_URL = 'http://commondatastorage.googleapis.com/lobbyingph/'
-AWS_ACCESS_KEY_ID = os.environ['AWS_KEY_ID']
-AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']
-AWS_STORAGE_BUCKET_NAME = 'lobbyingph'
+# AWS s3 for static files
+#STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
+#STATIC_URL = 'https://s3.amazonaws.com/lobbyingph/''
+#AWS_ACCESS_KEY_ID = os.environ['AWS_KEY_ID']
+#AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET']
+#AWS_STORAGE_BUCKET_NAME = 'lobbyingph'
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
|
ee32b2e48acd47f1f1ff96482abf20f3d1818fc4
|
tests/__init__.py
|
tests/__init__.py
|
# -*- coding: utf-8 -*-
"""
Unit test.
Each file in tests/ is for each main package.
"""
import sys
import unittest
sys.path.append("../pythainlp")
loader = unittest.TestLoader()
testSuite = loader.discover("tests")
testRunner = unittest.TextTestRunner(verbosity=1)
testRunner.run(testSuite)
|
# -*- coding: utf-8 -*-
"""
Unit test.
Each file in tests/ is for each main package.
"""
import sys
import unittest
import nltk
sys.path.append("../pythainlp")
nltk.download('omw-1.4') # load wordnet
loader = unittest.TestLoader()
testSuite = loader.discover("tests")
testRunner = unittest.TextTestRunner(verbosity=1)
testRunner.run(testSuite)
|
Add load wordnet to tests
|
Add load wordnet to tests
|
Python
|
apache-2.0
|
PyThaiNLP/pythainlp
|
---
+++
@@ -6,8 +6,11 @@
"""
import sys
import unittest
+import nltk
sys.path.append("../pythainlp")
+
+nltk.download('omw-1.4') # load wordnet
loader = unittest.TestLoader()
testSuite = loader.discover("tests")
|
dd248a14a40dea03458985640571bccf9b38b030
|
conftest.py
|
conftest.py
|
import pytest
import compas
import math
import numpy
def pytest_ignore_collect(path):
if "rhino" in str(path):
return True
if "blender" in str(path):
return True
if "ghpython" in str(path):
return True
if "matlab" in str(path):
return True
if "robots" in str(path):
return True
if str(path).endswith('_cli.py'):
return True
@pytest.fixture(autouse=True)
def add_compas(doctest_namespace):
doctest_namespace["compas"] = compas
@pytest.fixture(autouse=True)
def add_math(doctest_namespace):
doctest_namespace["math"] = math
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
|
import pytest
import compas
import math
import numpy
def pytest_ignore_collect(path):
if "rhino" in str(path):
return True
if "blender" in str(path):
return True
if "ghpython" in str(path):
return True
if "matlab" in str(path):
return True
if str(path).endswith('_cli.py'):
return True
@pytest.fixture(autouse=True)
def add_compas(doctest_namespace):
doctest_namespace["compas"] = compas
@pytest.fixture(autouse=True)
def add_math(doctest_namespace):
doctest_namespace["math"] = math
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
|
Remove robots from pytest path ignore, as requested by @gonzalocasas.
|
Remove robots from pytest path ignore, as requested by @gonzalocasas.
|
Python
|
mit
|
compas-dev/compas
|
---
+++
@@ -17,9 +17,6 @@
if "matlab" in str(path):
return True
- if "robots" in str(path):
- return True
-
if str(path).endswith('_cli.py'):
return True
|
27ab3ad3d1ce869baec85264b840da49ff43f82f
|
scripts/sync_exceeded_traffic_limits.py
|
scripts/sync_exceeded_traffic_limits.py
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import os
from flask import _request_ctx_stack, g, request
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from pycroft.model import session
from pycroft.model.session import set_scoped_session
from scripts.schema import AlembicHelper, SchemaStrategist
from pycroft.lib import traffic
def main():
try:
connection_string = os.environ['PYCROFT_DB_URI']
except KeyError:
raise RuntimeError("Environment variable PYCROFT_DB_URI must be "
"set to an SQLAlchemy connection string.")
engine = create_engine(connection_string)
connection = engine.connect()
state = AlembicHelper(connection)
strategy = SchemaStrategist(state).determine_schema_strategy()
strategy()
engine = create_engine(connection_string)
set_scoped_session(scoped_session(sessionmaker(bind=engine),
scopefunc=lambda: _request_ctx_stack.top))
print("Starting synchronization of exceeded traffic limits.")
traffic.sync_exceeded_traffic_limits()
session.session.commit()
print("Finished synchronization.")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import os
from flask import _request_ctx_stack, g, request
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from pycroft.model import session
from pycroft.model.session import set_scoped_session
from scripts.schema import AlembicHelper, SchemaStrategist
from pycroft.lib import traffic
def main():
try:
connection_string = os.environ['PYCROFT_DB_URI']
except KeyError:
raise RuntimeError("Environment variable PYCROFT_DB_URI must be "
"set to an SQLAlchemy connection string.")
engine = create_engine(connection_string)
connection = engine.connect()
state = AlembicHelper(connection)
strategist = SchemaStrategist(state)
is_up_to_date = strategist.helper.running_version == strategist.helper.desired_version
if not is_up_to_date:
print("Schema is not up to date!")
return
set_scoped_session(scoped_session(sessionmaker(bind=engine),
scopefunc=lambda: _request_ctx_stack.top))
print("Starting synchronization of exceeded traffic limits.")
traffic.sync_exceeded_traffic_limits()
session.session.commit()
print("Finished synchronization.")
if __name__ == "__main__":
main()
|
Add schema version check to sync script
|
Add schema version check to sync script
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft
|
---
+++
@@ -25,10 +25,12 @@
engine = create_engine(connection_string)
connection = engine.connect()
state = AlembicHelper(connection)
- strategy = SchemaStrategist(state).determine_schema_strategy()
- strategy()
+ strategist = SchemaStrategist(state)
+ is_up_to_date = strategist.helper.running_version == strategist.helper.desired_version
+ if not is_up_to_date:
+ print("Schema is not up to date!")
+ return
- engine = create_engine(connection_string)
set_scoped_session(scoped_session(sessionmaker(bind=engine),
scopefunc=lambda: _request_ctx_stack.top))
|
a82067a5484133233ccf7037e5c277eaaa5318fa
|
aioes/__init__.py
|
aioes/__init__.py
|
import re
import sys
from collections import namedtuple
from .client import Elasticsearch
__all__ = ('Elasticsearch',)
__version__ = '0.1.0a'
version = __version__ + ' , Python ' + sys.version
VersionInfo = namedtuple('VersionInfo',
'major minor micro releaselevel serial')
def _parse_version(ver):
RE = (r'^(?P<major>\d+)\.(?P<minor>\d+)\.'
'(?P<micro>\d+)((?P<releaselevel>[a-z]+)(?P<serial>\d+)?)?$')
match = re.match(RE, ver)
try:
major = int(match.group('major'))
minor = int(match.group('minor'))
micro = int(match.group('micro'))
levels = {'c': 'candidate',
'a': 'alpha',
'b': 'beta',
None: 'final'}
releaselevel = levels[match.group('releaselevel')]
serial = int(match.group('serial')) if match.group('serial') else 0
return VersionInfo(major, minor, micro, releaselevel, serial)
except Exception:
raise ImportError("Invalid package version {}".format(ver))
version_info = _parse_version(__version__)
(Elasticsearch,)
|
import re
import sys
from collections import namedtuple
from .client import Elasticsearch
from .exception import (ConnectionError, NotFountError, ConflictError,
RequestError, TransportError)
__all__ = ('Elasticsearch', 'ConnectionError', 'NotFountError',
'ConflictError', 'RequestError', 'TransportError')
__version__ = '0.1.0a'
version = __version__ + ' , Python ' + sys.version
VersionInfo = namedtuple('VersionInfo',
'major minor micro releaselevel serial')
def _parse_version(ver):
RE = (r'^(?P<major>\d+)\.(?P<minor>\d+)\.'
'(?P<micro>\d+)((?P<releaselevel>[a-z]+)(?P<serial>\d+)?)?$')
match = re.match(RE, ver)
try:
major = int(match.group('major'))
minor = int(match.group('minor'))
micro = int(match.group('micro'))
levels = {'c': 'candidate',
'a': 'alpha',
'b': 'beta',
None: 'final'}
releaselevel = levels[match.group('releaselevel')]
serial = int(match.group('serial')) if match.group('serial') else 0
return VersionInfo(major, minor, micro, releaselevel, serial)
except Exception:
raise ImportError("Invalid package version {}".format(ver))
version_info = _parse_version(__version__)
(Elasticsearch, ConnectionError, NotFountError, ConflictError,
RequestError, TransportError)
|
Add aioes exceptions to top-level imports
|
Add aioes exceptions to top-level imports
|
Python
|
apache-2.0
|
aio-libs/aioes
|
---
+++
@@ -3,8 +3,11 @@
from collections import namedtuple
from .client import Elasticsearch
+from .exception import (ConnectionError, NotFountError, ConflictError,
+ RequestError, TransportError)
-__all__ = ('Elasticsearch',)
+__all__ = ('Elasticsearch', 'ConnectionError', 'NotFountError',
+ 'ConflictError', 'RequestError', 'TransportError')
__version__ = '0.1.0a'
@@ -38,4 +41,5 @@
version_info = _parse_version(__version__)
-(Elasticsearch,)
+(Elasticsearch, ConnectionError, NotFountError, ConflictError,
+ RequestError, TransportError)
|
7669a43b1dcf097434942bea64e05a29c32f9717
|
django_dowser/urls.py
|
django_dowser/urls.py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('django_dowser.views',
url(r'^trace/(?P<typename>[\.\-\w]+)(/(?P<objid>\d+))?$', 'trace'),
url(r'^tree/(?P<typename>[\.\-\w]+)/(?P<objid>\d+)$', 'tree'),
url(r'^$', 'index'),
)
|
try:
from django.conf.urls import *
except ImportError:
from django.conf.urls.defaults import *
urlpatterns = patterns('django_dowser.views',
url(r'^trace/(?P<typename>[\.\-\w]+)(/(?P<objid>\d+))?$', 'trace'),
url(r'^tree/(?P<typename>[\.\-\w]+)/(?P<objid>\d+)$', 'tree'),
url(r'^$', 'index'),
)
|
Fix compatibility with Django 1.6
|
Fix compatibility with Django 1.6
|
Python
|
mit
|
munhitsu/django-dowser,munhitsu/django-dowser
|
---
+++
@@ -1,4 +1,8 @@
-from django.conf.urls.defaults import *
+try:
+ from django.conf.urls import *
+except ImportError:
+ from django.conf.urls.defaults import *
+
urlpatterns = patterns('django_dowser.views',
url(r'^trace/(?P<typename>[\.\-\w]+)(/(?P<objid>\d+))?$', 'trace'),
|
4a4dbfd142e2f8fca3e82d7790ace4ed88bb0b3f
|
djangocms_spa/urls.py
|
djangocms_spa/urls.py
|
from django.conf.urls import url
from .views import SpaCmsPageDetailApiView
urlpatterns = [
url(r'^(?P<language_code>[\w-]+)/pages/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail_home'),
url(r'^(?P<language_code>[\w-]+)/pages/(?P<path>.*)/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail'),
]
|
from django.conf.urls import url
from .views import SpaCmsPageDetailApiView
urlpatterns = [
url(r'^pages/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail_home'),
url(r'^pages/(?P<path>.*)/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail'),
]
|
Remove language code from path
|
Remove language code from path
We no longer need the language detection in the URL. The locale
middleware already handles the language properly and we can consume
it from the request.
|
Python
|
mit
|
dreipol/djangocms-spa,dreipol/djangocms-spa
|
---
+++
@@ -3,6 +3,6 @@
from .views import SpaCmsPageDetailApiView
urlpatterns = [
- url(r'^(?P<language_code>[\w-]+)/pages/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail_home'),
- url(r'^(?P<language_code>[\w-]+)/pages/(?P<path>.*)/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail'),
+ url(r'^pages/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail_home'),
+ url(r'^pages/(?P<path>.*)/$', SpaCmsPageDetailApiView.as_view(), name='cms_page_detail'),
]
|
cff0f979abc4bf9bfb24b9cd70c447a2bc838501
|
syncplay/__init__.py
|
syncplay/__init__.py
|
version = '1.7.0'
revision = ' development'
milestone = 'Yoitsu'
release_number = '101'
projectURL = 'https://syncplay.pl/'
|
version = '1.7.0'
revision = ' beta 1'
milestone = 'Yoitsu'
release_number = '102'
projectURL = 'https://syncplay.pl/'
|
Mark as 1.7.0 beta 1
|
Mark as 1.7.0 beta 1
|
Python
|
apache-2.0
|
Syncplay/syncplay,Syncplay/syncplay
|
---
+++
@@ -1,5 +1,5 @@
version = '1.7.0'
-revision = ' development'
+revision = ' beta 1'
milestone = 'Yoitsu'
-release_number = '101'
+release_number = '102'
projectURL = 'https://syncplay.pl/'
|
85e7433948785b233876bb0f85795adf49636712
|
ca/views.py
|
ca/views.py
|
from flask import Flask, request, render_template, flash, url_for, abort
from itsdangerous import URLSafeSerializer
from ca import app, db
from ca.forms import RequestForm
from ca.models import Request
s = URLSafeSerializer(app.config['SECRET_KEY'])
@app.route('/', methods=['GET', 'POST'])
def index():
form = RequestForm(request.form)
if request.method == 'POST' and form.validate():
req = Request(form.id.data, form.email.data)
db.session.add(req)
db.session.commit()
token = s.dumps(req.id, salt='freifunk-ca-service')
confirm_url = url_for('get_certificate',
token=token,
_external=True)
return render_template('thanks.html')
return render_template('index.html', form=form)
@app.route('/certificates/<token>', methods=['GET'])
def get_certificate(token):
try:
cert_id = s.loads(token, salt='freifunk-ca-service')
except:
abort(404)
ca_req = Request.query.get_or_404(cert_id)
print(ca_req)
return "return key + cert here + {}".format(ca_req)
|
from flask import Flask, request, render_template, flash, url_for, abort
from itsdangerous import URLSafeSerializer
from ca import app, db
from ca.forms import RequestForm
from ca.models import Request
s = URLSafeSerializer(app.config['SECRET_KEY'])
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/', methods=['POST'])
def post_request():
form = RequestForm(request.form)
if form.validate():
req = Request(form.id.data, form.email.data)
db.session.add(req)
db.session.commit()
token = s.dumps(req.id, salt='freifunk-ca-service')
confirm_url = url_for('get_certificate',
token=token,
_external=True)
return render_template('thanks.html')
else:
return render_template('index.html', form=form)
@app.route('/certificates/<token>', methods=['GET'])
def get_certificate(token):
try:
cert_id = s.loads(token, salt='freifunk-ca-service')
except:
abort(404)
ca_req = Request.query.get_or_404(cert_id)
print(ca_req)
return "return key + cert here + {}".format(ca_req)
|
Split index route into get and post
|
Split index route into get and post
|
Python
|
mit
|
freifunk-berlin/ca.berlin.freifunk.net,freifunk-berlin/ca.berlin.freifunk.net,freifunk-berlin/ca.berlin.freifunk.net
|
---
+++
@@ -7,10 +7,14 @@
s = URLSafeSerializer(app.config['SECRET_KEY'])
-@app.route('/', methods=['GET', 'POST'])
+@app.route('/', methods=['GET'])
def index():
+ return render_template('index.html')
+
+@app.route('/', methods=['POST'])
+def post_request():
form = RequestForm(request.form)
- if request.method == 'POST' and form.validate():
+ if form.validate():
req = Request(form.id.data, form.email.data)
db.session.add(req)
db.session.commit()
@@ -20,8 +24,8 @@
_external=True)
return render_template('thanks.html')
- return render_template('index.html', form=form)
-
+ else:
+ return render_template('index.html', form=form)
@app.route('/certificates/<token>', methods=['GET'])
def get_certificate(token):
|
fe1d8b2172aecf4f2f7cebe3c61eeb778f3db23a
|
src/cms/apps/historylinks/middleware.py
|
src/cms/apps/historylinks/middleware.py
|
"""Middleware used by the history links service."""
from django.shortcuts import redirect
from cms.apps.historylinks.models import HistoryLink
class HistoryLinkFallbackMiddleware(object):
"""Middleware that attempts to rescue 404 responses with a redirect to it's new location."""
def process_response(self, request, response):
"""Attempts to rescue 404 responses."""
if response.status_code == 404:
# Try to rescue the response.
try:
link = HistoryLink.objects.get(path=request.path)
path = link.object.get_absolute_url()
if path != request.path:
return redirect(link.object, permanent=True)
return response
except HistoryLink.DoesNotExist:
pass
return response
|
"""Middleware used by the history links service."""
from django.shortcuts import redirect
from cms.apps.historylinks.models import HistoryLink
class HistoryLinkFallbackMiddleware(object):
"""Middleware that attempts to rescue 404 responses with a redirect to it's new location."""
def process_response(self, request, response):
"""Attempts to rescue 404 responses."""
if response.status_code == 404:
# Try to rescue the response.
try:
link = HistoryLink.objects.get(path=request.path)
obj = link.object
if obj:
path = obj.get_absolute_url()
if path != request.path:
return redirect(link.object, permanent=True)
return response
except HistoryLink.DoesNotExist:
pass
return response
|
Fix for historylinks connecting to missing objects
|
Fix for historylinks connecting to missing objects
|
Python
|
bsd-3-clause
|
etianen/cms,etianen/cms,danielsamuels/cms,danielsamuels/cms,danielsamuels/cms,dan-gamble/cms,etianen/cms,lewiscollard/cms,lewiscollard/cms,jamesfoley/cms,dan-gamble/cms,jamesfoley/cms,jamesfoley/cms,dan-gamble/cms,lewiscollard/cms,jamesfoley/cms
|
---
+++
@@ -14,9 +14,11 @@
# Try to rescue the response.
try:
link = HistoryLink.objects.get(path=request.path)
- path = link.object.get_absolute_url()
- if path != request.path:
- return redirect(link.object, permanent=True)
+ obj = link.object
+ if obj:
+ path = obj.get_absolute_url()
+ if path != request.path:
+ return redirect(link.object, permanent=True)
return response
except HistoryLink.DoesNotExist:
pass
|
3b1c42b5001bf70fff47a53a1cf003538b619c53
|
auth_mac/models.py
|
auth_mac/models.py
|
from django.db import models
# Create your models here.
|
from django.db import models
from django.contrib.auth.models import User
class Credentials(models.Model):
"Keeps track of issued MAC credentials"
user = models.ForeignKey(User)
expiry = models.DateTimeField("Expires On")
identifier = models.CharField("MAC Key Identifier", max_length=16, null=True, blank=True)
key = models.CharField("MAC Key", max_length=16, null=True, blank=True)
class Nonce(models.Model):
"""Keeps track of any NONCE combinations that we have used"""
nonce = models.CharField("NONCE", max_length=16, null=True, blank=True)
timestamp = models.DateTimeField("Timestamp", auto_now_add=True)
credentials = models.ForeignKey(Credentials)
|
Add a basic model for the identifications
|
Add a basic model for the identifications
|
Python
|
mit
|
ndevenish/auth_mac
|
---
+++
@@ -1,3 +1,15 @@
from django.db import models
+from django.contrib.auth.models import User
-# Create your models here.
+class Credentials(models.Model):
+ "Keeps track of issued MAC credentials"
+ user = models.ForeignKey(User)
+ expiry = models.DateTimeField("Expires On")
+ identifier = models.CharField("MAC Key Identifier", max_length=16, null=True, blank=True)
+ key = models.CharField("MAC Key", max_length=16, null=True, blank=True)
+
+class Nonce(models.Model):
+ """Keeps track of any NONCE combinations that we have used"""
+ nonce = models.CharField("NONCE", max_length=16, null=True, blank=True)
+ timestamp = models.DateTimeField("Timestamp", auto_now_add=True)
+ credentials = models.ForeignKey(Credentials)
|
83c7fb070d0d79036ce697835e69c5e0aa2e14b7
|
app/core/info.py
|
app/core/info.py
|
import os
import pathlib
# RELEASE-UPDATE
APP_DIR = pathlib.Path(os.path.realpath(__file__)).parent.parent
ROOT_DIR = APP_DIR.parent
DEFAULT_DB_PATH = '/instance/storage/storage.db'
PROJECT_NAME = 'Zordon'
PROJECT_VERSION = '4.0.0'
PROJECT_FULL_NAME = '{} v{}'.format(PROJECT_NAME, PROJECT_VERSION)
|
import os
import pathlib
# RELEASE-UPDATE
APP_DIR = pathlib.Path(os.path.realpath(__file__)).parent.parent
ROOT_DIR = APP_DIR.parent
DEFAULT_DB_PATH = '/instance/storage'
PROJECT_NAME = 'Zordon'
PROJECT_VERSION = '4.0.0'
PROJECT_FULL_NAME = '{} v{}'.format(PROJECT_NAME, PROJECT_VERSION)
|
Fix storage path for Docker mode
|
Fix storage path for Docker mode
|
Python
|
mit
|
KrusnikViers/Zordon,KrusnikViers/Zordon
|
---
+++
@@ -5,7 +5,7 @@
# RELEASE-UPDATE
APP_DIR = pathlib.Path(os.path.realpath(__file__)).parent.parent
ROOT_DIR = APP_DIR.parent
-DEFAULT_DB_PATH = '/instance/storage/storage.db'
+DEFAULT_DB_PATH = '/instance/storage'
PROJECT_NAME = 'Zordon'
PROJECT_VERSION = '4.0.0'
PROJECT_FULL_NAME = '{} v{}'.format(PROJECT_NAME, PROJECT_VERSION)
|
87f4bb8cdcb607cb4f15ecbda9a3cb50a3fd5319
|
src/webargs/__init__.py
|
src/webargs/__init__.py
|
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from marshmallow.utils import missing
# Make marshmallow's validation functions importable from webargs
from marshmallow import validate
from webargs.core import ValidationError
from webargs.dict2schema import dict2schema
from webargs import fields
__version__ = "5.5.0"
__version_info__ = tuple(LooseVersion(__version__).version)
__author__ = "Steven Loria"
__license__ = "MIT"
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
|
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
from marshmallow.utils import missing
# Make marshmallow's validation functions importable from webargs
from marshmallow import validate
from webargs.core import ValidationError
from webargs.dict2schema import dict2schema
from webargs import fields
__version__ = "5.5.0"
__version_info__ = tuple(LooseVersion(__version__).version)
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
|
Remove unnnecessary __author__ and __license__
|
Remove unnnecessary __author__ and __license__
|
Python
|
mit
|
sloria/webargs
|
---
+++
@@ -11,8 +11,4 @@
__version__ = "5.5.0"
__version_info__ = tuple(LooseVersion(__version__).version)
-__author__ = "Steven Loria"
-__license__ = "MIT"
-
-
__all__ = ("dict2schema", "ValidationError", "fields", "missing", "validate")
|
24fd469296951fd8445e18d482a97ad5bb9108e7
|
storm/tests/conftest.py
|
storm/tests/conftest.py
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import pytest
from .common import INSTANCE, HOST
from datadog_checks.dev import docker_run, get_here, run_command
from datadog_checks.dev.conditions import CheckCommandOutput
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml')
# Build the topology jar to use in the environment
with docker_run(compose_file, build=True, service_name='topology-maker', sleep=15):
run_command(
['docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose')]
)
nimbus_condition = CheckCommandOutput(['nc', '-zv', HOST, '6627'], 'succeeded')
with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]):
with docker_run(compose_file, service_name='storm-ui',
log_patterns=[r'org.apache.storm.ui.core']):
with docker_run(
compose_file, service_name='topology',
log_patterns=['Finished submitting topology: topology']
):
yield INSTANCE
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import socket
import pytest
from .common import INSTANCE, HOST
from datadog_checks.dev import docker_run, get_here, run_command
from datadog_checks.dev.conditions import WaitFor
def wait_for_thrift():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, 6627))
sock.close()
@pytest.fixture(scope='session')
def dd_environment():
compose_file = os.path.join(get_here(), 'compose', 'docker-compose.yaml')
# Build the topology jar to use in the environment
with docker_run(compose_file, build=True, service_name='topology-maker', sleep=15):
run_command(
['docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose')]
)
nimbus_condition = WaitFor(wait_for_thrift)
with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]):
with docker_run(compose_file, service_name='storm-ui',
log_patterns=[r'org.apache.storm.ui.core']):
with docker_run(
compose_file, service_name='topology',
log_patterns=['Finished submitting topology: topology']
):
yield INSTANCE
|
Use socket instead of nc
|
Use socket instead of nc
|
Python
|
bsd-3-clause
|
DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras,DataDog/integrations-extras
|
---
+++
@@ -2,12 +2,19 @@
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
+import socket
import pytest
from .common import INSTANCE, HOST
from datadog_checks.dev import docker_run, get_here, run_command
-from datadog_checks.dev.conditions import CheckCommandOutput
+from datadog_checks.dev.conditions import WaitFor
+
+
+def wait_for_thrift():
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((HOST, 6627))
+ sock.close()
@pytest.fixture(scope='session')
@@ -19,7 +26,7 @@
run_command(
['docker', 'cp', 'topology-build:/topology.jar', os.path.join(get_here(), 'compose')]
)
- nimbus_condition = CheckCommandOutput(['nc', '-zv', HOST, '6627'], 'succeeded')
+ nimbus_condition = WaitFor(wait_for_thrift)
with docker_run(compose_file, service_name='storm-nimbus', conditions=[nimbus_condition]):
with docker_run(compose_file, service_name='storm-ui',
log_patterns=[r'org.apache.storm.ui.core']):
|
33962b72cea77735732c31e6af6dac585ebe271e
|
charat2/tasks/__init__.py
|
charat2/tasks/__init__.py
|
from celery import Celery, Task
from redis import StrictRedis
from charat2.model import sm
from charat2.model.connections import redis_pool
celery = Celery("newparp", include=[
"charat2.tasks.background",
"charat2.tasks.matchmaker",
"charat2.tasks.reaper",
"charat2.tasks.roulette_matchmaker",
])
celery.config_from_object('charat2.tasks.config')
class WorkerTask(Task):
abstrct = True
_db = None
_redis = None
@property
def db(self):
if self._db is None:
self._db = sm()
return self._db
@property
def redis(self):
if self._redis is None:
self._redis = StrictRedis(connection_pool=redis_pool)
return self._redis
|
from celery import Celery, Task
from classtools import reify
from redis import StrictRedis
from charat2.model import sm
from charat2.model.connections import redis_pool
celery = Celery("newparp", include=[
"charat2.tasks.background",
"charat2.tasks.matchmaker",
"charat2.tasks.reaper",
"charat2.tasks.roulette_matchmaker",
])
celery.config_from_object('charat2.tasks.config')
class WorkerTask(Task):
abstrct = True
@reify
def db(self):
return sm()
@reify
def redis(self):
return StrictRedis(connection_pool=redis_pool)
def after_return(self, *args, **kwargs):
if hasattr(self, "db"):
self.db.close()
del self.db
if hasattr(self, "redis"):
del self.redis
|
Use reify instead of properties for the task backend connections.
|
Use reify instead of properties for the task backend connections.
|
Python
|
agpl-3.0
|
MSPARP/newparp,MSPARP/newparp,MSPARP/newparp
|
---
+++
@@ -1,4 +1,5 @@
from celery import Celery, Task
+from classtools import reify
from redis import StrictRedis
from charat2.model import sm
@@ -15,19 +16,19 @@
class WorkerTask(Task):
abstrct = True
- _db = None
- _redis = None
- @property
+ @reify
def db(self):
- if self._db is None:
- self._db = sm()
+ return sm()
- return self._db
+ @reify
+ def redis(self):
+ return StrictRedis(connection_pool=redis_pool)
- @property
- def redis(self):
- if self._redis is None:
- self._redis = StrictRedis(connection_pool=redis_pool)
+ def after_return(self, *args, **kwargs):
+ if hasattr(self, "db"):
+ self.db.close()
+ del self.db
- return self._redis
+ if hasattr(self, "redis"):
+ del self.redis
|
b353441e33e8f272177b16505f12358f8a30fe6a
|
crowd_anki/main.py
|
crowd_anki/main.py
|
import os
import sys
from aqt import mw, QAction, QFileDialog
sys.path.append(os.path.join(os.path.dirname(__file__), "dist"))
from .anki.hook_vendor import HookVendor
from .anki.ui.action_vendor import ActionVendor
from .utils.log import setup_log
def anki_actions_init(window):
action_vendor = ActionVendor(window, QAction, lambda caption: QFileDialog.getExistingDirectory(caption=caption))
after_export_action_position = -2
window.form.menuCol.insertActions(window.form.menuCol.actions()[after_export_action_position],
action_vendor.actions())
def anki_init(window):
if not window:
return
HookVendor(window).setup_hooks()
anki_actions_init(window)
setup_log()
anki_init(mw)
"""
Warning:
Creation of collection has a side effect of changing current directory to ${collection_path}/collection.media
"""
|
import os
import sys
from aqt import mw, QAction, QFileDialog
sys.path.append(os.path.join(os.path.dirname(__file__), "dist"))
from .anki.hook_vendor import HookVendor
from .anki.ui.action_vendor import ActionVendor
def anki_actions_init(window):
action_vendor = ActionVendor(window, QAction, lambda caption: QFileDialog.getExistingDirectory(caption=caption))
after_export_action_position = -2
window.form.menuCol.insertActions(window.form.menuCol.actions()[after_export_action_position],
action_vendor.actions())
def anki_init(window):
if not window:
return
HookVendor(window).setup_hooks()
anki_actions_init(window)
anki_init(mw)
"""
Warning:
Creation of collection has a side effect of changing current directory to ${collection_path}/collection.media
"""
|
Remove reference to log, as it's not set up correctly yet.
|
Remove reference to log, as it's not set up correctly yet.
|
Python
|
mit
|
Stvad/CrowdAnki,Stvad/CrowdAnki,Stvad/CrowdAnki
|
---
+++
@@ -7,7 +7,6 @@
from .anki.hook_vendor import HookVendor
from .anki.ui.action_vendor import ActionVendor
-from .utils.log import setup_log
def anki_actions_init(window):
@@ -24,7 +23,6 @@
HookVendor(window).setup_hooks()
anki_actions_init(window)
- setup_log()
anki_init(mw)
|
ab59cf04530dbbcecf912b60dce181a0b24c6d29
|
download.py
|
download.py
|
#!/usr/bin/python
import sys, os
import ads
from nameparser import HumanName
reload(sys)
sys.setdefaultencoding('utf8')
names_file = open(sys.argv[1])
#Default abstract storage
abstract_directory = "abstracts"
if len(sys.argv) > 2:
abstract_directory = sys.argv[2]
if not os.path.exists(abstract_directory):
os.makedirs(abstract_directory)
number_abstracts = 4
if len(sys.argv) > 3:
number_abstracts = int(sys.argv[3])
author_num = 0
for line in names_file:
#Only names
if line[0]==',': continue
if len(line) < 4: continue
print "Author", author_num
parsed_name = HumanName(line)
papers = ads.SearchQuery(
author=parsed_name.first+", "+parsed_name.last,
sort='date',
fl=['abstract'])
abstract_file = open(abstract_directory+"/"+\
parsed_name.first+" "+parsed_name.last+".txt",'w')
j = 0
try:
for paper in papers:
abstract_file.write("Abstract "+str(j)+"\n")
try:
abstract_file.write(paper.abstract.encode('utf-8'))
except AttributeError:
pass
abstract_file.write("\n")
j += 1
if j > number_abstracts: break
except ads.base.APIResponseError:
continue
author_num+=1
|
#!/usr/bin/python
import sys, os
import ads
from nameparser import HumanName
reload(sys)
sys.setdefaultencoding('utf8')
names_file = open(sys.argv[1])
#Default abstract storage
abstract_directory = "abstracts"
if len(sys.argv) > 2:
abstract_directory = sys.argv[2]
if not os.path.exists(abstract_directory):
os.makedirs(abstract_directory)
number_abstracts = 4
if len(sys.argv) > 3:
number_abstracts = int(sys.argv[3])
author_num = 0
for line in names_file:
#Only names
if line[0]==',': continue
if len(line) < 4: continue
parsed_name = HumanName(line)
print "Author:", str(parsed_name)
papers = ads.SearchQuery(
q='author:"'+str(parsed_name)+'"',
sort='date',
fl=['abstract'])
abstract_file = open(abstract_directory+"/"+\
parsed_name.first+" "+parsed_name.last+".txt",'w')
j = 0
while True:
try:
for paper in papers:
abstract_file.write("Abstract "+str(j)+"\n")
try:
abstract_file.write(paper.abstract.encode('utf-8'))
except AttributeError:
pass
abstract_file.write("\n")
j += 1
if j > number_abstracts:
break
except ads.base.APIResponseError:
pass
else:
break
author_num+=1
|
Fix tab/space issues and use automatic name wildcard
|
Fix tab/space issues and use automatic name wildcard
- Using a regular query search allows ADS to automatically adjust author
name to hit multiple abstracts
|
Python
|
unlicense
|
MilesCranmer/research_match,MilesCranmer/research_match
|
---
+++
@@ -12,41 +12,45 @@
#Default abstract storage
abstract_directory = "abstracts"
if len(sys.argv) > 2:
- abstract_directory = sys.argv[2]
+ abstract_directory = sys.argv[2]
if not os.path.exists(abstract_directory):
- os.makedirs(abstract_directory)
+ os.makedirs(abstract_directory)
number_abstracts = 4
if len(sys.argv) > 3:
- number_abstracts = int(sys.argv[3])
+ number_abstracts = int(sys.argv[3])
author_num = 0
for line in names_file:
- #Only names
- if line[0]==',': continue
- if len(line) < 4: continue
+ #Only names
+ if line[0]==',': continue
+ if len(line) < 4: continue
- print "Author", author_num
- parsed_name = HumanName(line)
- papers = ads.SearchQuery(
- author=parsed_name.first+", "+parsed_name.last,
- sort='date',
- fl=['abstract'])
- abstract_file = open(abstract_directory+"/"+\
- parsed_name.first+" "+parsed_name.last+".txt",'w')
- j = 0
+ parsed_name = HumanName(line)
+ print "Author:", str(parsed_name)
+ papers = ads.SearchQuery(
+ q='author:"'+str(parsed_name)+'"',
+ sort='date',
+ fl=['abstract'])
+ abstract_file = open(abstract_directory+"/"+\
+ parsed_name.first+" "+parsed_name.last+".txt",'w')
+ j = 0
+ while True:
try:
for paper in papers:
- abstract_file.write("Abstract "+str(j)+"\n")
- try:
- abstract_file.write(paper.abstract.encode('utf-8'))
- except AttributeError:
- pass
- abstract_file.write("\n")
- j += 1
- if j > number_abstracts: break
+ abstract_file.write("Abstract "+str(j)+"\n")
+ try:
+ abstract_file.write(paper.abstract.encode('utf-8'))
+ except AttributeError:
+ pass
+ abstract_file.write("\n")
+ j += 1
+ if j > number_abstracts:
+ break
except ads.base.APIResponseError:
- continue
- author_num+=1
+ pass
+ else:
+ break
+ author_num+=1
|
9717271fc02b3294b08b3989913c14da68285601
|
service_control/urls.py
|
service_control/urls.py
|
"""service_control URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.authtoken import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', views.obtain_auth_token),
url(r'core/', include('core.urls')),
url(r'profiles/', include('pages.urls.profile_urls')),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
"""service_control URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework.authtoken import views
from rest_framework.urlpatterns import format_suffix_patterns
from pages.views import profile_login
urlpatterns = [
url(r'^$', profile_login, name='home_page'),
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', views.obtain_auth_token),
url(r'core/', include('core.urls')),
url(r'profiles/', include('pages.urls.profile_urls')),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
Make user loggin as the homepage
|
Make user loggin as the homepage
|
Python
|
mit
|
desenho-sw-g5/service_control,desenho-sw-g5/service_control
|
---
+++
@@ -19,8 +19,11 @@
from rest_framework.authtoken import views
from rest_framework.urlpatterns import format_suffix_patterns
+from pages.views import profile_login
urlpatterns = [
+ url(r'^$', profile_login, name='home_page'),
+
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', views.obtain_auth_token),
|
93c914a0537ee0665e5139e8a8a8bc9508a25dd7
|
test/strings/format2.py
|
test/strings/format2.py
|
"normal {{ normal }} normal {fo.__add__!s}"
" : source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
{{ : constant.character.format.python, source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
}} : constant.character.format.python, source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
{fo.__add__ : constant.character.format.python, source.python, string.quoted.double.python
!s : constant.character.format.python, source.python, storage.type.format.python, string.quoted.double.python
} : constant.character.format.python, source.python, string.quoted.double.python
" : source.python, string.quoted.double.python
|
"normal {{ normal }} normal {fo.__add__!s}".format(fo=1)
" : source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
{{ : constant.character.format.python, source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
}} : constant.character.format.python, source.python, string.quoted.double.python
normal : source.python, string.quoted.double.python
{fo.__add__ : constant.character.format.python, source.python, string.quoted.double.python
!s : constant.character.format.python, source.python, storage.type.format.python, string.quoted.double.python
} : constant.character.format.python, source.python, string.quoted.double.python
" : source.python, string.quoted.double.python
. : source.python
format : meta.function-call.python, source.python
( : meta.function-call.arguments.python, meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
fo : meta.function-call.arguments.python, meta.function-call.python, source.python, variable.parameter.function-call.pyhton
= : keyword.operator.assignment.python, meta.function-call.arguments.python, meta.function-call.python, source.python
1 : constant.numeric.dec.python, meta.function-call.arguments.python, meta.function-call.python, source.python
) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
|
Add a test for .format() method
|
Add a test for .format() method
|
Python
|
mit
|
MagicStack/MagicPython,MagicStack/MagicPython,MagicStack/MagicPython
|
---
+++
@@ -1,4 +1,4 @@
-"normal {{ normal }} normal {fo.__add__!s}"
+"normal {{ normal }} normal {fo.__add__!s}".format(fo=1)
" : source.python, string.quoted.double.python
@@ -11,3 +11,10 @@
!s : constant.character.format.python, source.python, storage.type.format.python, string.quoted.double.python
} : constant.character.format.python, source.python, string.quoted.double.python
" : source.python, string.quoted.double.python
+. : source.python
+format : meta.function-call.python, source.python
+( : meta.function-call.arguments.python, meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
+fo : meta.function-call.arguments.python, meta.function-call.python, source.python, variable.parameter.function-call.pyhton
+= : keyword.operator.assignment.python, meta.function-call.arguments.python, meta.function-call.python, source.python
+1 : constant.numeric.dec.python, meta.function-call.arguments.python, meta.function-call.python, source.python
+) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
|
6612f0e3cd98a037f8b441cba1b3defc46977d66
|
tests/test_structure_check.py
|
tests/test_structure_check.py
|
import pytest
from datatyping.datatyping import validate
def test_empty():
with pytest.raises(TypeError):
assert validate([], ()) is None
def test_empty_reversed():
with pytest.raises(TypeError):
assert validate((), []) is None
def test_plain():
with pytest.raises(TypeError):
assert validate([int], (1, 2, 3, 4, 5)) is None
def test_plain_reversed():
with pytest.raises(TypeError):
assert validate((int, ), [1, 2, 3, 4, 5]) is None
from types import SimpleNamespace
def test_mapping_empty():
with pytest.raises(TypeError):
assert validate([dict], [SimpleNamespace(),
SimpleNamespace(), SimpleNamespace()]) is None
def test_mapping_empty_reversed():
with pytest.raises(TypeError):
assert validate([SimpleNamespace], [{}, {}, {}]) is None
def test_dict_nested():
with pytest.raises(TypeError):
assert validate([{'a': {'b': [dict]}}],
[
{'a': {'b': [{}, SimpleNamespace()]}},
{'a': {'b': [{'any': 'key'}, {'used': 'here'}]}},
]) is None
|
from collections import OrderedDict
import pytest
from hypothesis import given
from hypothesis.strategies import lists, tuples, integers, dictionaries, \
fixed_dictionaries
from datatyping.datatyping import validate
@given(lst=lists(integers()), tpl=tuples(integers()))
def test_different_sequences(lst, tpl):
with pytest.raises(TypeError):
if tpl:
validate([int], tpl)
else:
validate([], tpl)
with pytest.raises(TypeError):
if lst:
validate((int), lst)
else:
validate((), lst)
@given(dct=dictionaries(integers(), integers()))
def test_different_mappings(dct):
with pytest.raises(TypeError):
validate(dict, OrderedDict(dct))
validate(OrderedDict, dct)
@given(lst=lists(
fixed_dictionaries({
'a': fixed_dictionaries({
'b': lists(
dictionaries(integers(), integers(), min_size=1),
min_size=1
)
})
}),
min_size=1
))
def test_dict_nested(lst):
assert validate([{'a': {'b': [dict]}}], lst) is None
|
Rewrite structure_check tests with hypothesis `OrderedDict` chosen instead of `SimpleNamespace` as the former supports the dictionary-like item access, unlike the latter.
|
Rewrite structure_check tests with hypothesis
`OrderedDict` chosen instead of `SimpleNamespace` as the former supports the dictionary-like item access, unlike the latter.
|
Python
|
mit
|
Zaab1t/datatyping
|
---
+++
@@ -1,45 +1,45 @@
+from collections import OrderedDict
+
import pytest
+from hypothesis import given
+from hypothesis.strategies import lists, tuples, integers, dictionaries, \
+ fixed_dictionaries
+
from datatyping.datatyping import validate
-def test_empty():
+@given(lst=lists(integers()), tpl=tuples(integers()))
+def test_different_sequences(lst, tpl):
with pytest.raises(TypeError):
- assert validate([], ()) is None
+ if tpl:
+ validate([int], tpl)
+ else:
+ validate([], tpl)
+
+ with pytest.raises(TypeError):
+ if lst:
+ validate((int), lst)
+ else:
+ validate((), lst)
-def test_empty_reversed():
+@given(dct=dictionaries(integers(), integers()))
+def test_different_mappings(dct):
with pytest.raises(TypeError):
- assert validate((), []) is None
+ validate(dict, OrderedDict(dct))
+ validate(OrderedDict, dct)
-def test_plain():
- with pytest.raises(TypeError):
- assert validate([int], (1, 2, 3, 4, 5)) is None
-
-
-def test_plain_reversed():
- with pytest.raises(TypeError):
- assert validate((int, ), [1, 2, 3, 4, 5]) is None
-
-
-from types import SimpleNamespace
-
-
-def test_mapping_empty():
- with pytest.raises(TypeError):
- assert validate([dict], [SimpleNamespace(),
- SimpleNamespace(), SimpleNamespace()]) is None
-
-
-def test_mapping_empty_reversed():
- with pytest.raises(TypeError):
- assert validate([SimpleNamespace], [{}, {}, {}]) is None
-
-
-def test_dict_nested():
- with pytest.raises(TypeError):
- assert validate([{'a': {'b': [dict]}}],
- [
- {'a': {'b': [{}, SimpleNamespace()]}},
- {'a': {'b': [{'any': 'key'}, {'used': 'here'}]}},
- ]) is None
+@given(lst=lists(
+ fixed_dictionaries({
+ 'a': fixed_dictionaries({
+ 'b': lists(
+ dictionaries(integers(), integers(), min_size=1),
+ min_size=1
+ )
+ })
+ }),
+ min_size=1
+))
+def test_dict_nested(lst):
+ assert validate([{'a': {'b': [dict]}}], lst) is None
|
9f7bc70713dfc5864841b9f90fe2ec4bbd406b8d
|
kay/models.py
|
kay/models.py
|
# -*- coding: utf-8 -*-
"""
kay.models
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from google.appengine.ext import db
from kay.utils import crypto
class NamedModel(db.Model):
""" This base model has a classmethod for automatically asigning a
new uuid for its key_name on creation of a new entity.
"""
@classmethod
def create_new_entity(cls, **kwargs):
def txn():
uuid = crypto.new_iid()
if kwargs.has_key('parent'):
entity = cls.get_by_key_name(uuid, parent=kwargs['parent'])
else:
entity = cls.get_by_key_name(uuid)
while entity is not None:
uuid = crypto.new_iid()
if kwargs.has_key('parent'):
entity = cls.get_by_key_name(uuid, parent=kwargs['parent'])
else:
entity = cls.get_by_key_name(uuid)
entity = cls(key_name=uuid, **kwargs)
entity.put()
return entity
return db.run_in_transaction(txn)
|
# -*- coding: utf-8 -*-
"""
kay.models
:Copyright: (c) 2009 Takashi Matsuo <tmatsuo@candit.jp> All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from google.appengine.ext import db
from kay.utils import crypto
class NamedModel(db.Model):
""" This base model has a classmethod for automatically asigning a
new uuid for its key_name on creation of a new entity.
"""
@classmethod
def get_key_generator(cls):
while 1:
yield crypto.new_iid()
@classmethod
def create_new_entity(cls, **kwargs):
key_generator = cls.get_key_generator()
first_key_name = key_generator.next()
def txn():
key_name = first_key_name
if kwargs.has_key('parent'):
entity = cls.get_by_key_name(key_name, parent=kwargs['parent'])
else:
entity = cls.get_by_key_name(key_name)
while entity is not None:
key_name = key_negerator.next()
if kwargs.has_key('parent'):
entity = cls.get_by_key_name(key_name, parent=kwargs['parent'])
else:
entity = cls.get_by_key_name(key_name)
entity = cls(key_name=key_name, **kwargs)
entity.put()
return entity
return db.run_in_transaction(txn)
|
Allow replacing get_key_generator class method in subclasses.
|
Allow replacing get_key_generator class method in subclasses.
|
Python
|
bsd-3-clause
|
Letractively/kay-framework,Letractively/kay-framework,Letractively/kay-framework,Letractively/kay-framework
|
---
+++
@@ -16,20 +16,27 @@
new uuid for its key_name on creation of a new entity.
"""
@classmethod
+ def get_key_generator(cls):
+ while 1:
+ yield crypto.new_iid()
+
+ @classmethod
def create_new_entity(cls, **kwargs):
+ key_generator = cls.get_key_generator()
+ first_key_name = key_generator.next()
def txn():
- uuid = crypto.new_iid()
+ key_name = first_key_name
if kwargs.has_key('parent'):
- entity = cls.get_by_key_name(uuid, parent=kwargs['parent'])
+ entity = cls.get_by_key_name(key_name, parent=kwargs['parent'])
else:
- entity = cls.get_by_key_name(uuid)
+ entity = cls.get_by_key_name(key_name)
while entity is not None:
- uuid = crypto.new_iid()
+ key_name = key_negerator.next()
if kwargs.has_key('parent'):
- entity = cls.get_by_key_name(uuid, parent=kwargs['parent'])
+ entity = cls.get_by_key_name(key_name, parent=kwargs['parent'])
else:
- entity = cls.get_by_key_name(uuid)
- entity = cls(key_name=uuid, **kwargs)
+ entity = cls.get_by_key_name(key_name)
+ entity = cls(key_name=key_name, **kwargs)
entity.put()
return entity
return db.run_in_transaction(txn)
|
4a7ca3439c9ad8368849accc25bbb554daae1940
|
knights/dj.py
|
knights/dj.py
|
from collections import defaultdict
from django.template.base import TemplateDoesNotExist, TemplateSyntaxError # NOQA
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self, template_name):
try:
tmpl = loader.load_template(template_name, self.template_dirs)
except loader.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
except Exception as e:
raise TemplateSyntaxError(e).with_traceback(e.__traceback__)
return Template(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['user'] = request.user
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
ctx = defaultdict(str)
ctx.update(context)
return self.template(ctx)
|
from collections import defaultdict
from django.template.base import TemplateDoesNotExist, TemplateSyntaxError # NOQA
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in self.template_dirs:
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self, template_name):
try:
tmpl = loader.load_template(template_name)
except loader.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
except Exception as e:
raise TemplateSyntaxError(e).with_traceback(e.__traceback__)
return Template(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['user'] = request.user
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
ctx = defaultdict(str)
ctx.update(context)
return self.template(ctx)
|
Add all the paths so includes/extends work
|
Add all the paths so includes/extends work
|
Python
|
mit
|
funkybob/knights-templater,funkybob/knights-templater
|
---
+++
@@ -16,13 +16,16 @@
super(KnightsTemplater, self).__init__(params)
+ for path in self.template_dirs:
+ loader.add_path(path)
+
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self, template_name):
try:
- tmpl = loader.load_template(template_name, self.template_dirs)
+ tmpl = loader.load_template(template_name)
except loader.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
except Exception as e:
|
d9e12128d1fca7069275fc9e669e1176cc1837f6
|
organization/projects/views.py
|
organization/projects/views.py
|
from django.shortcuts import render
from organization.projects.models import *
from organization.core.views import *
class ProjectListView(ListView):
model = Project
template_name='project/project_list.html'
class ProjectDetailView(SlugMixin, DetailView):
model = Project
template_name='project/project_detail.html'
context_object_name = 'project'
|
from django.shortcuts import render
from organization.projects.models import *
from organization.core.views import *
class ProjectListView(ListView):
model = Project
template_name='projects/project_list.html'
class ProjectDetailView(SlugMixin, DetailView):
model = Project
template_name='project/project_detail.html'
context_object_name = 'project'
|
Fix project detail template path
|
Fix project detail template path
|
Python
|
agpl-3.0
|
Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization
|
---
+++
@@ -7,7 +7,7 @@
class ProjectListView(ListView):
model = Project
- template_name='project/project_list.html'
+ template_name='projects/project_list.html'
class ProjectDetailView(SlugMixin, DetailView):
|
e2dc271f20e6115f54ab4aac123f6790fcc7764c
|
backend/start.py
|
backend/start.py
|
#!/usr/bin/python
# coding: utf-8
import sys
import signal
import logging
import argparse
from lib.DbConnector import DbConnector
from lib.Acquisition import Acquisition
from lib.SystemMonitor import SystemMonitor
acq = Acquisition()
sm = SystemMonitor()
def signalHandler(signal, frame):
logging.warning("Caught ^C")
acq.Stop()
sm.Stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'Start acquisition'
)
parser.add_argument("-v", "--verbose", help="Verbositiy setting",
action="count", default=0)
args = parser.parse_args()
# configure logging
if args.verbose == 1:
wLogLevel = logging.WARNING
elif args.verbose == 2:
wLogLevel = logging.INFO
elif args.verbose >= 3:
wLogLevel = logging.DEBUG
else:
wLogLevel = logging.ERROR
logging.basicConfig(level=wLogLevel)
logging.info('Started')
# signal handling
signal.signal(signal.SIGINT, signalHandler)
# start things
with DbConnector() as wDb:
wDb.Init()
sm.start()
acq.GetData()
# exit things
sys.exit(0)
|
#!/usr/bin/python
# coding: utf-8
import sys
import signal
import logging
import argparse
from lib.DbConnector import DbConnector
from lib.Acquisition import Acquisition
from lib.SystemMonitor import SystemMonitor
acq = Acquisition()
sm = SystemMonitor()
def signalHandler(signal, frame):
logging.warning("Caught ^C")
acq.Stop()
sm.Stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'Start acquisition'
)
parser.add_argument("-v", "--verbose", help="Verbositiy setting",
action="count", default=0)
args = parser.parse_args()
# configure logging
if args.verbose == 1:
wLogLevel = logging.WARNING
elif args.verbose == 2:
wLogLevel = logging.INFO
elif args.verbose >= 3:
wLogLevel = logging.DEBUG
else:
wLogLevel = logging.ERROR
logging.basicConfig(level=wLogLevel)
logging.info('Started')
# signal handling
signal.signal(signal.SIGINT, signalHandler)
# start things
with DbConnector() as wDb:
wDb.Init()
sm.start()
acq.GetData()
# exit things
sys.exit(0)
|
Fix end of line format
|
Fix end of line format
|
Python
|
mit
|
ftoulemon/Dilebhome,ftoulemon/Dilebhome,ftoulemon/Dilebhome,ftoulemon/Dilebhome
| |
1baa04b3f47c92a14c61e7bbb6b32dc35dd51f5d
|
chatroom/views.py
|
chatroom/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
def index(request):
return render(request, 'index.html')
def append(request):
# open("data", "a").write(str(request.args.get("msg")) + "\n\r")
open("data", "a").write(request.GET['msg'] + "\n\r")
return HttpResponse("")
def retreive(request):
return HttpResponse(open("data").read())
def faq(request):
return render(request, 'faq.html')
def about_us(request):
return render(request, 'about_us.html')
def exchange(request):
return render(request, 'exchange.html')
def chatroom(request):
return render(request, 'chatroom.html')
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
def index(request):
return render(request, 'index.html')
def append(request):
# open("data", "a").write(str(request.args.get("msg")) + "\n\r")
open("/tmp/data", "ab").write(request.GET['msg'].encode('utf8') + "\n\r".encode('utf-8'))
return HttpResponse("")
def retreive(request):
fil = open("/tmp/data", "rb")
payload = fil.read()
return HttpResponse(payload)
def faq(request):
return render(request, 'faq.html')
def about_us(request):
return render(request, 'about_us.html')
def exchange(request):
return render(request, 'exchange.html')
def chatroom(request):
return render(request, 'chatroom.html')
|
Fix encoding bugs on production server
|
Fix encoding bugs on production server
|
Python
|
mit
|
sonicyang/chiphub,sonicyang/chiphub,sonicyang/chiphub
|
---
+++
@@ -7,11 +7,13 @@
def append(request):
# open("data", "a").write(str(request.args.get("msg")) + "\n\r")
- open("data", "a").write(request.GET['msg'] + "\n\r")
+ open("/tmp/data", "ab").write(request.GET['msg'].encode('utf8') + "\n\r".encode('utf-8'))
return HttpResponse("")
def retreive(request):
- return HttpResponse(open("data").read())
+ fil = open("/tmp/data", "rb")
+ payload = fil.read()
+ return HttpResponse(payload)
def faq(request):
return render(request, 'faq.html')
|
7da00e458525302b009da35d3410dcaccf55fa94
|
booksite/views.py
|
booksite/views.py
|
from django.shortcuts import render
from .models import Tale
def tale_list(request):
tale_list = Tale.objects.all()
return render(request, 'booksite/index.html', {'tale_list' : tale_list})
def create_book(request, tale_id):
tale=Tale.objects.get(id=tale_id)
return render(request, 'booksite/create_book.html', {"tale":tale})
|
from django.shortcuts import render
from .models import Tale
def tale_list(request):
tale_list = Tale.objects.all()
return render(request, 'booksite/index.html', {'tale_list' : tale_list})
def create_tale(request, tale_id):
tale=Tale.objects.get(id=tale_id)
return render(request, 'booksite/create_tale.html', {"tale":tale})
|
Rename links from *book* to *tale*
|
Rename links from *book* to *tale*
|
Python
|
apache-2.0
|
mark-graciov/bookit,mark-graciov/bookit
|
---
+++
@@ -8,7 +8,7 @@
return render(request, 'booksite/index.html', {'tale_list' : tale_list})
-def create_book(request, tale_id):
+def create_tale(request, tale_id):
tale=Tale.objects.get(id=tale_id)
- return render(request, 'booksite/create_book.html', {"tale":tale})
+ return render(request, 'booksite/create_tale.html', {"tale":tale})
|
1df25ada51d0be794f2d689161b1c93e81512d3b
|
students/psbriant/final_project/clean_data.py
|
students/psbriant/final_project/clean_data.py
|
"""
Name: Paul Briant
Date: 12/11/16
Class: Introduction to Python
Assignment: Final Project
Description:
Code for Final Project
"""
import pandas
from datetime import datetime
def clean(data):
"""
Take in data and return cleaned version.
"""
# Remove Date Values column
data = data.drop(["Date Value"], axis=1)
# Determine what values are missing
# empty = data.apply(lambda col: pandas.isnull(col))
def main():
"""
"""
# Connect to file.
data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv")
clean(data)
if __name__ == '__main__':
main()
|
"""
Name: Paul Briant
Date: 12/11/16
Class: Introduction to Python
Assignment: Final Project
Description:
Code for Final Project
"""
import pandas
from datetime import datetime
def clean(data):
"""
Take in data and return cleaned version.
"""
# Remove Date Values column
data = data.drop(["Date Value"], axis=1)
# Determine what values are missing
# empty = data.apply(lambda col: pandas.isnull(col))
return data
def find_low_water_use(data):
"""
"""
under100 = data[(data["90012"] <= 100) & (data["90013"] <= 100)]
print(under100)
under25 = data[(data["90012"] <= 25) & (data["90013"] <= 25)]
print(under25)
def main():
"""
"""
# Connect to file.
data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv")
cleaned_data = clean(data)
find_low_water_use(cleaned_data)
if __name__ == '__main__':
main()
|
Add filter for water use in downtown LA zipcodes.
|
Add filter for water use in downtown LA zipcodes.
|
Python
|
unlicense
|
UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016,UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016,UWPCE-PythonCert/IntroPython2016,weidnem/IntroPython2016
|
---
+++
@@ -22,6 +22,18 @@
# Determine what values are missing
# empty = data.apply(lambda col: pandas.isnull(col))
+ return data
+
+
+def find_low_water_use(data):
+ """
+
+ """
+ under100 = data[(data["90012"] <= 100) & (data["90013"] <= 100)]
+ print(under100)
+ under25 = data[(data["90012"] <= 25) & (data["90013"] <= 25)]
+ print(under25)
+
def main():
"""
@@ -30,7 +42,8 @@
# Connect to file.
data = pandas.read_csv("data/Residential_Water_Usage_Zip_Code_on_Top.csv")
- clean(data)
+ cleaned_data = clean(data)
+ find_low_water_use(cleaned_data)
if __name__ == '__main__':
|
6451808c2dfb3d207bdd69c8aa138554f52cf5ba
|
python/common-child.py
|
python/common-child.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# See https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
def commonChild(s1, s2):
matrix = [[0 for i in range(len(s2) + 1)] for j in range(len(s1)+ 1)]
for row_i in range(len(s1)):
for col_i in range(len(s2)):
if s1[row_i] == s2[col_i]:
matrix[row_i + 1][col_i + 1] = matrix[row_i][col_i] + 1
else:
matrix[row_i + 1][col_i + 1] = max(matrix[row_i+1][col_i], matrix[row_i][col_i + 1])
return matrix[len(s1)][len(s2)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s1 = input()
s2 = input()
result = commonChild(s1, s2)
fptr.write(str(result) + '\n')
fptr.close()
|
#!/bin/python3
import math
import os
import random
import re
import sys
# See https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
# This solution creates the matrix described in "Traceback approach"
def common_child(s1, s2):
matrix = [[0 for i in range(len(s2) + 1)] for j in range(len(s1)+ 1)]
for row_i in range(len(s1)):
for col_i in range(len(s2)):
if s1[row_i] == s2[col_i]:
matrix[row_i + 1][col_i + 1] = matrix[row_i][col_i] + 1
else:
matrix[row_i + 1][col_i + 1] = max(matrix[row_i+1][col_i], matrix[row_i][col_i + 1])
return matrix[len(s1)][len(s2)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s1 = input()
s2 = input()
result = common_child(s1, s2)
fptr.write(str(result) + '\n')
fptr.close()
|
Include dev comment on solution
|
Include dev comment on solution
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
---
+++
@@ -7,7 +7,8 @@
import sys
# See https://en.wikipedia.org/wiki/Longest_common_subsequence_problem
-def commonChild(s1, s2):
+# This solution creates the matrix described in "Traceback approach"
+def common_child(s1, s2):
matrix = [[0 for i in range(len(s2) + 1)] for j in range(len(s1)+ 1)]
for row_i in range(len(s1)):
@@ -19,13 +20,10 @@
return matrix[len(s1)][len(s2)]
-
-
-
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s1 = input()
s2 = input()
- result = commonChild(s1, s2)
+ result = common_child(s1, s2)
fptr.write(str(result) + '\n')
fptr.close()
|
90fe4c98b5e93058c6cfd090958922070351a04d
|
quicksort/quicksort.py
|
quicksort/quicksort.py
|
def sort(arr, length):
if length == 1:
return
pivot = choose_pivot(arr, length)
return (arr, length, pivot)
def choose_pivot(arr, length):
return arr[0]
if __name__ == '__main__':
unsorted = list(reversed(range(1000)))
initial_len = len(unsorted)
print sort(unsorted, initial_len)
|
from random import randint
def sort(arr, start, length):
if length <= 1:
return arr
pivot = choose_pivot(arr, length)
i = j = start + 1
while j < length:
if arr[j] < pivot:
swap(arr, j, i)
i += 1
j += 1
swap(arr, start, i-1)
return (arr, length, pivot)
def swap(arr, x, y):
temp = arr[x]
arr[x] = arr[y]
arr[y] = temp
def choose_pivot(arr, length):
return arr[0]
if __name__ == '__main__':
unsorted = [randint(0, 100) for n in range(100)]
print sort(unsorted, 0, len(unsorted)-1)
|
Add partition step and swap helper function
|
Add partition step and swap helper function
The data is partitioned iterating over the array and moving any
element less than the pivot value to the left part of the array.
This is done using an additional variable i that represents the
index of the smallest value greater than the pivot - any value
less than the pivot is swapped at the value at this index, which
is then incremented.
This version still uses a naive choice of pivot.
|
Python
|
mit
|
timpel/stanford-algs,timpel/stanford-algs
|
---
+++
@@ -1,11 +1,28 @@
-def sort(arr, length):
-
- if length == 1:
- return
+from random import randint
+
+def sort(arr, start, length):
+
+ if length <= 1:
+ return arr
pivot = choose_pivot(arr, length)
+ i = j = start + 1
+
+ while j < length:
+ if arr[j] < pivot:
+ swap(arr, j, i)
+ i += 1
+ j += 1
+
+ swap(arr, start, i-1)
return (arr, length, pivot)
+
+
+def swap(arr, x, y):
+ temp = arr[x]
+ arr[x] = arr[y]
+ arr[y] = temp
def choose_pivot(arr, length):
@@ -15,7 +32,5 @@
if __name__ == '__main__':
- unsorted = list(reversed(range(1000)))
- initial_len = len(unsorted)
-
- print sort(unsorted, initial_len)
+ unsorted = [randint(0, 100) for n in range(100)]
+ print sort(unsorted, 0, len(unsorted)-1)
|
9a86ba51893b6f03d3ffde12ec2f331339ddd0f1
|
UI/client_config.py
|
UI/client_config.py
|
from PyQt4 import QtCore, QtGui
from utilities.backend_config import Configuration
from qt_interfaces.settings_ui_new import Ui_ClientConfiguration
from utilities.log_manager import logger
# Configuration Ui section
class ClientConfigurationUI(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# register UI
self.client_configuration_ui = Ui_ClientConfiguration()
self.client_configuration_ui.setupUi(self)
self.configuration_manager = Configuration()
QtCore.QObject.connect(self.client_configuration_ui.apply_bt, QtCore.SIGNAL("clicked()"),
self.save_settings) # valudate and register user
def save_settings(self):
# validate settings
self.configuration_manager.save_client_configuration(self.client_configuration_ui) # save configuration
def reset_settings_to_default(self):
logger.debug(1)
|
from PyQt4 import QtCore, QtGui
from utilities.backend_config import Configuration
from qt_interfaces.settings_ui_new import Ui_ClientConfiguration
from utilities.log_manager import logger
# Configuration Ui section
class ClientConfigurationUI(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# register UI
self.client_configuration_ui = Ui_ClientConfiguration()
self.client_configuration_ui.setupUi(self)
self.configuration_manager = Configuration()
QtCore.QObject.connect(self.client_configuration_ui.apply_bt, QtCore.SIGNAL("clicked()"),
self.save_settings) # save settings action
QtCore.QObject.connect(self.client_configuration_ui.cancel_bt, QtCore.SIGNAL("clicked()"),
self.close) # close form
def save_settings(self):
# validate settings
self.configuration_manager.save_client_configuration(self.client_configuration_ui) # save configuration
QtGui.QMessageBox.about(self, "Success", "Configuration saved successfully!")
def reset_settings_to_default(self):
logger.debug(1)
|
Add some actions to settings
|
Add some actions to settings
|
Python
|
mit
|
lakewik/storj-gui-client
|
---
+++
@@ -17,12 +17,18 @@
self.configuration_manager = Configuration()
QtCore.QObject.connect(self.client_configuration_ui.apply_bt, QtCore.SIGNAL("clicked()"),
- self.save_settings) # valudate and register user
+ self.save_settings) # save settings action
+
+ QtCore.QObject.connect(self.client_configuration_ui.cancel_bt, QtCore.SIGNAL("clicked()"),
+ self.close) # close form
def save_settings(self):
# validate settings
self.configuration_manager.save_client_configuration(self.client_configuration_ui) # save configuration
+ QtGui.QMessageBox.about(self, "Success", "Configuration saved successfully!")
+
+
def reset_settings_to_default(self):
logger.debug(1)
|
9d79893f119d696ead124d9e34b21acf34cd6f8f
|
pygotham/admin/schedule.py
|
pygotham/admin/schedule.py
|
"""Admin for schedule-related models."""
from pygotham.admin.utils import model_view
from pygotham.schedule import models
# This line is really long because pep257 needs it to be on one line.
__all__ = ('DayModelView', 'RoomModelView', 'SlotModelView', 'PresentationModelView')
CATEGORY = 'Schedule'
DayModelView = model_view(
models.Day,
'Days',
CATEGORY,
column_default_sort='date',
column_list=('date', 'event'),
form_columns=('event', 'date'),
)
RoomModelView = model_view(
models.Room,
'Rooms',
CATEGORY,
column_default_sort='order',
form_columns=('name', 'order'),
)
SlotModelView = model_view(
models.Slot,
'Slots',
CATEGORY,
column_list=('day', 'rooms', 'kind', 'start', 'end'),
form_columns=('day', 'rooms', 'kind', 'start', 'end', 'content_override'),
)
PresentationModelView = model_view(
models.Presentation,
'Presentations',
CATEGORY,
)
|
"""Admin for schedule-related models."""
from pygotham.admin.utils import model_view
from pygotham.schedule import models
# This line is really long because pep257 needs it to be on one line.
__all__ = ('DayModelView', 'RoomModelView', 'SlotModelView', 'PresentationModelView')
CATEGORY = 'Schedule'
DayModelView = model_view(
models.Day,
'Days',
CATEGORY,
column_default_sort='date',
column_list=('date', 'event'),
form_columns=('event', 'date'),
)
RoomModelView = model_view(
models.Room,
'Rooms',
CATEGORY,
column_default_sort='order',
form_columns=('name', 'order'),
)
SlotModelView = model_view(
models.Slot,
'Slots',
CATEGORY,
column_default_sort='start',
column_list=('day', 'rooms', 'kind', 'start', 'end'),
form_columns=('day', 'rooms', 'kind', 'start', 'end', 'content_override'),
)
PresentationModelView = model_view(
models.Presentation,
'Presentations',
CATEGORY,
)
|
Change admin sort for slots
|
Change admin sort for slots
|
Python
|
bsd-3-clause
|
pathunstrom/pygotham,PyGotham/pygotham,djds23/pygotham-1,PyGotham/pygotham,djds23/pygotham-1,djds23/pygotham-1,PyGotham/pygotham,pathunstrom/pygotham,PyGotham/pygotham,djds23/pygotham-1,pathunstrom/pygotham,pathunstrom/pygotham,djds23/pygotham-1,pathunstrom/pygotham,PyGotham/pygotham
|
---
+++
@@ -32,6 +32,7 @@
models.Slot,
'Slots',
CATEGORY,
+ column_default_sort='start',
column_list=('day', 'rooms', 'kind', 'start', 'end'),
form_columns=('day', 'rooms', 'kind', 'start', 'end', 'content_override'),
)
|
d1969d6bc016b0e3dc66df7eeb30a9c76debc6b6
|
tests/test_async_eventlet.py
|
tests/test_async_eventlet.py
|
import logging
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import async_eventlet
class TestServer(unittest.TestCase):
def setUp(self):
logging.getLogger('engineio').setLevel(logging.NOTSET)
@mock.patch('engineio.async_eventlet._WebSocketWSGI.__call__',
return_value='data')
def test_wsgi_call(self, _WebSocketWSGI):
_WebSocketWSGI.__call__ = lambda e,s: 'data'
environ = {"eventlet.input": None}
start_response = "bar"
wsgi = async_eventlet.WebSocketWSGI(None)
self.assertEqual(wsgi(environ, start_response), 'data')
|
import logging
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import async_eventlet
class TestServer(unittest.TestCase):
def setUp(self):
logging.getLogger('engineio').setLevel(logging.NOTSET)
@mock.patch('engineio.async_eventlet._WebSocketWSGI.__call__',
return_value='data')
def test_wsgi_call(self, _WebSocketWSGI):
_WebSocketWSGI.__call__ = lambda e, s: 'data'
environ = {"eventlet.input": None}
start_response = "bar"
wsgi = async_eventlet.WebSocketWSGI(None)
self.assertEqual(wsgi(environ, start_response), 'data')
|
Update tests to correspond with flake8
|
Update tests to correspond with flake8
|
Python
|
mit
|
miguelgrinberg/python-engineio,miguelgrinberg/python-engineio,miguelgrinberg/python-engineio
|
---
+++
@@ -17,9 +17,8 @@
@mock.patch('engineio.async_eventlet._WebSocketWSGI.__call__',
return_value='data')
def test_wsgi_call(self, _WebSocketWSGI):
- _WebSocketWSGI.__call__ = lambda e,s: 'data'
+ _WebSocketWSGI.__call__ = lambda e, s: 'data'
environ = {"eventlet.input": None}
start_response = "bar"
wsgi = async_eventlet.WebSocketWSGI(None)
self.assertEqual(wsgi(environ, start_response), 'data')
-
|
8b076747c756bc4fe488f3b2f5a0265b7fd880f0
|
matrix/matrix.py
|
matrix/matrix.py
|
class Matrix(object):
def __init__(self, s):
self.rows = [list(map(int, row.split()))
for row in s.split("\n")]
@property
def columns(self):
return [[row[i] for row in self.rows]
for i in range(len(self.rows[0]))]
|
class Matrix(object):
def __init__(self, s):
self.rows = [list(map(int, row.split()))
for row in s.split("\n")]
@property
def columns(self):
return [list(col) for col in zip(*self.rows)]
|
Use zip for a shorter solution
|
Use zip for a shorter solution
|
Python
|
agpl-3.0
|
CubicComet/exercism-python-solutions
|
---
+++
@@ -5,5 +5,4 @@
@property
def columns(self):
- return [[row[i] for row in self.rows]
- for i in range(len(self.rows[0]))]
+ return [list(col) for col in zip(*self.rows)]
|
8d46db626298f2d21f4f1d8b6f75fdc08bd761dc
|
zinnia/models/author.py
|
zinnia/models/author.py
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
@python_2_unicode_compatible
class Author(get_user_model()):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
objects = get_user_model()._default_manager
published = EntryRelatedPublishedManager()
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
class AuthorManagers(models.Model):
published = EntryRelatedPublishedManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Author(get_user_model(), AuthorManagers):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
Move Author Managers into an abstract base class
|
Move Author Managers into an abstract base class
Copying of the default manager causes the source model to become poluted.
To supply additional managers without replacing the default manager,
the Django docs recommend inheriting from an abstract base class.
https://docs.djangoproject.com/en/dev/topics/db/models/#proxy-model-managers
|
Python
|
bsd-3-clause
|
bywbilly/django-blog-zinnia,Zopieux/django-blog-zinnia,petecummings/django-blog-zinnia,ZuluPro/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,Maplecroft/django-blog-zinnia,petecummings/django-blog-zinnia,Fantomas42/django-blog-zinnia,1844144/django-blog-zinnia,marctc/django-blog-zinnia,Fantomas42/django-blog-zinnia,ghachey/django-blog-zinnia,Zopieux/django-blog-zinnia,1844144/django-blog-zinnia,dapeng0802/django-blog-zinnia,ghachey/django-blog-zinnia,marctc/django-blog-zinnia,ghachey/django-blog-zinnia,bywbilly/django-blog-zinnia,ZuluPro/django-blog-zinnia,ZuluPro/django-blog-zinnia,1844144/django-blog-zinnia,dapeng0802/django-blog-zinnia,extertioner/django-blog-zinnia,Maplecroft/django-blog-zinnia,aorzh/django-blog-zinnia,Maplecroft/django-blog-zinnia,bywbilly/django-blog-zinnia,Fantomas42/django-blog-zinnia,extertioner/django-blog-zinnia,dapeng0802/django-blog-zinnia,Zopieux/django-blog-zinnia,aorzh/django-blog-zinnia,extertioner/django-blog-zinnia,aorzh/django-blog-zinnia
|
---
+++
@@ -6,15 +6,17 @@
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
+class AuthorManagers(models.Model):
+ published = EntryRelatedPublishedManager()
+
+ class Meta:
+ abstract = True
@python_2_unicode_compatible
-class Author(get_user_model()):
+class Author(get_user_model(), AuthorManagers):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
-
- objects = get_user_model()._default_manager
- published = EntryRelatedPublishedManager()
def entries_published(self):
"""
|
dc002c23891ea5d2fe37e059cee6de0381c284cf
|
vumi/transports/dmark/dmark_ussd.py
|
vumi/transports/dmark/dmark_ussd.py
|
from vumi.transports.httprpc import HttpRpcTransport
class DmarkUssdTransportConfig(HttpRpcTransport.CONFIG_CLASS):
"""Config for Dmark USSD transport."""
class DmarkUssdTransport(HttpRpcTransport):
"""Dmark USSD transport over HTTP.
When a USSD message is received, Dmark will make an HTTP GET request to
the transport with the following query parameters:
* ``transactionId``: A unique ID for the USSD session (string).
* ``msisdn``: The phone number that the message was sent from (string).
* ``ussdServiceCode``: The USSD Service code the request was made to
(string).
* ``transactionTime``: The time the USSD request was received at Dmark,
as a Unix timestamp (UTC).
* ``ussdRequestString``: The full content of the USSD request(string).
* ``creationTime``: The time the USSD request was sent, as a Unix
timestamp (UTC), if available. (This time is given by the mobile
network, and may not always be reliable.)
The transport may respond to this request either using JSON or form-encoded
data. A successful response must return HTTP status code 200. Any other
response code is treated as a failure.
This transport responds with JSON encoded data. The JSON response
contains the following keys:
* ``responseString``: The content to be returned to the phone number that
originated the USSD request.
* ``action``: Either ``end`` or ``request``. ``end`` signifies that no
further interaction is expected from the user and the USSD session should
be closed. ``request`` signifies that further interaction is expected.
**Example JSON response**:
.. sourcecode: javascript
{
"responseString": "Hello from Vumi!",
"action": "end"
}
"""
CONFIG_CLASS = DmarkUssdTransportConfig
def handle_raw_inbound_message(self, msgid, request):
raise NotImplementedError("Sub-classes should implement"
" handle_raw_inbound_message.")
def handle_outbound_message(self, message):
self.emit("DmarkUssdTransport consuming %s" % (message))
missing_fields = self.ensure_message_values(message,
['in_reply_to', 'content'])
if missing_fields:
return self.reject_message(message, missing_fields)
else:
self.finish_request(
message.payload['in_reply_to'],
message.payload['content'].encode('utf-8'))
return self.publish_ack(user_message_id=message['message_id'],
sent_message_id=message['message_id'])
|
Document how the Dmark protocol is expected to work.
|
Document how the Dmark protocol is expected to work.
|
Python
|
bsd-3-clause
|
TouK/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,vishwaprakashmishra/xmatrix
|
---
+++
@@ -0,0 +1,66 @@
+
+from vumi.transports.httprpc import HttpRpcTransport
+
+
+class DmarkUssdTransportConfig(HttpRpcTransport.CONFIG_CLASS):
+ """Config for Dmark USSD transport."""
+
+
+class DmarkUssdTransport(HttpRpcTransport):
+ """Dmark USSD transport over HTTP.
+
+ When a USSD message is received, Dmark will make an HTTP GET request to
+ the transport with the following query parameters:
+
+ * ``transactionId``: A unique ID for the USSD session (string).
+ * ``msisdn``: The phone number that the message was sent from (string).
+ * ``ussdServiceCode``: The USSD Service code the request was made to
+ (string).
+ * ``transactionTime``: The time the USSD request was received at Dmark,
+ as a Unix timestamp (UTC).
+ * ``ussdRequestString``: The full content of the USSD request(string).
+ * ``creationTime``: The time the USSD request was sent, as a Unix
+ timestamp (UTC), if available. (This time is given by the mobile
+ network, and may not always be reliable.)
+
+ The transport may respond to this request either using JSON or form-encoded
+ data. A successful response must return HTTP status code 200. Any other
+ response code is treated as a failure.
+
+ This transport responds with JSON encoded data. The JSON response
+ contains the following keys:
+
+ * ``responseString``: The content to be returned to the phone number that
+ originated the USSD request.
+ * ``action``: Either ``end`` or ``request``. ``end`` signifies that no
+ further interaction is expected from the user and the USSD session should
+ be closed. ``request`` signifies that further interaction is expected.
+
+ **Example JSON response**:
+
+ .. sourcecode: javascript
+
+ {
+ "responseString": "Hello from Vumi!",
+ "action": "end"
+ }
+ """
+
+ CONFIG_CLASS = DmarkUssdTransportConfig
+
+ def handle_raw_inbound_message(self, msgid, request):
+ raise NotImplementedError("Sub-classes should implement"
+ " handle_raw_inbound_message.")
+
+ def handle_outbound_message(self, message):
+ self.emit("DmarkUssdTransport consuming %s" % (message))
+ missing_fields = self.ensure_message_values(message,
+ ['in_reply_to', 'content'])
+ if missing_fields:
+ return self.reject_message(message, missing_fields)
+ else:
+ self.finish_request(
+ message.payload['in_reply_to'],
+ message.payload['content'].encode('utf-8'))
+ return self.publish_ack(user_message_id=message['message_id'],
+ sent_message_id=message['message_id'])
|
|
099ab577bf3d03bd5f2d579bbf82a8035690219e
|
tests/test_auth.py
|
tests/test_auth.py
|
"""Unit test module for auth"""
import json
from flask.ext.login import login_user, logout_user
from tests import TestCase, LAST_NAME, FIRST_NAME, TEST_USER_ID
from portal.extensions import db
from portal.models.auth import Client
class TestAuth(TestCase):
def test_client_edit(self):
# Generate a minimal client belonging to test user
client_id = 'test_client'
client = Client(client_id=client_id,
client_secret='tc_secret', user_id=TEST_USER_ID)
db.session.add(client)
db.session.commit()
self.promote_user(role_name='application_developer')
self.login()
rv = self.app.post('/client/{0}'.format(client.client_id),
data=dict(callback_url='http://tryme.com'))
self.assertEquals(rv.status_code, 200)
client = Client.query.get('test_client')
self.assertEquals(client.callback_url, 'http://tryme.com')
|
"""Unit test module for auth"""
import json
from flask.ext.login import login_user, logout_user
from tests import TestCase, LAST_NAME, FIRST_NAME, TEST_USER_ID
from portal.extensions import db
from portal.models.auth import Client
class TestAuth(TestCase):
def test_client_edit(self):
# Generate a minimal client belonging to test user
client_id = 'test_client'
client = Client(client_id=client_id,
client_secret='tc_secret', user_id=TEST_USER_ID)
db.session.add(client)
db.session.commit()
self.promote_user(role_name='application_developer')
self.login()
rv = self.app.post('/client/{0}'.format(client.client_id),
data=dict(callback_url='http://tryme.com'))
client = Client.query.get('test_client')
self.assertEquals(client.callback_url, 'http://tryme.com')
|
Fix test - change in client redirection previously overlooked.
|
Fix test - change in client redirection previously overlooked.
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
---
+++
@@ -20,6 +20,5 @@
rv = self.app.post('/client/{0}'.format(client.client_id),
data=dict(callback_url='http://tryme.com'))
- self.assertEquals(rv.status_code, 200)
client = Client.query.get('test_client')
self.assertEquals(client.callback_url, 'http://tryme.com')
|
78463a6ba34f1503f3c6fd5fdb287a0593f4be68
|
website/addons/figshare/__init__.py
|
website/addons/figshare/__init__.py
|
import os
from . import routes, views, model # noqa
MODELS = [
model.AddonFigShareUserSettings,
model.AddonFigShareNodeSettings,
model.FigShareGuidFile
]
USER_SETTINGS_MODEL = model.AddonFigShareUserSettings
NODE_SETTINGS_MODEL = model.AddonFigShareNodeSettings
ROUTES = [routes.settings_routes, routes.api_routes]
SHORT_NAME = 'figshare'
FULL_NAME = 'figshare'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['user', 'node']
CATEGORIES = ['storage']
INCLUDE_JS = {}
INCLUDE_CSS = {}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.figshare_hgrid_data
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # use default nodes settings templates
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'figshare_user_settings.mako')
|
import os
from . import routes, views, model # noqa
MODELS = [
model.AddonFigShareUserSettings,
model.AddonFigShareNodeSettings,
model.FigShareGuidFile
]
USER_SETTINGS_MODEL = model.AddonFigShareUserSettings
NODE_SETTINGS_MODEL = model.AddonFigShareNodeSettings
ROUTES = [routes.settings_routes, routes.api_routes]
SHORT_NAME = 'figshare'
FULL_NAME = 'figshare'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['user', 'node']
CATEGORIES = ['storage']
INCLUDE_JS = {}
INCLUDE_CSS = {}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.figshare_hgrid_data
MAX_FILE_SIZE = 50
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # use default nodes settings templates
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'figshare_user_settings.mako')
|
Set figshare's MAX_FILE_SIZE to 50mb
|
Set figshare's MAX_FILE_SIZE to 50mb
|
Python
|
apache-2.0
|
chennan47/osf.io,baylee-d/osf.io,HarryRybacki/osf.io,mluo613/osf.io,jolene-esposito/osf.io,binoculars/osf.io,petermalcolm/osf.io,caneruguz/osf.io,haoyuchen1992/osf.io,zamattiac/osf.io,arpitar/osf.io,amyshi188/osf.io,jnayak1/osf.io,mluke93/osf.io,RomanZWang/osf.io,cldershem/osf.io,abought/osf.io,doublebits/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,Ghalko/osf.io,wearpants/osf.io,MerlinZhang/osf.io,acshi/osf.io,jolene-esposito/osf.io,wearpants/osf.io,chrisseto/osf.io,danielneis/osf.io,cosenal/osf.io,baylee-d/osf.io,sloria/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,felliott/osf.io,sbt9uc/osf.io,mluo613/osf.io,njantrania/osf.io,cwisecarver/osf.io,ticklemepierce/osf.io,aaxelb/osf.io,arpitar/osf.io,samchrisinger/osf.io,KAsante95/osf.io,amyshi188/osf.io,wearpants/osf.io,TomBaxter/osf.io,danielneis/osf.io,amyshi188/osf.io,dplorimer/osf,kch8qx/osf.io,njantrania/osf.io,brandonPurvis/osf.io,GageGaskins/osf.io,mfraezz/osf.io,DanielSBrown/osf.io,lyndsysimon/osf.io,acshi/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,cosenal/osf.io,cldershem/osf.io,saradbowman/osf.io,laurenrevere/osf.io,bdyetton/prettychart,RomanZWang/osf.io,SSJohns/osf.io,HarryRybacki/osf.io,haoyuchen1992/osf.io,brianjgeiger/osf.io,Ghalko/osf.io,ticklemepierce/osf.io,ticklemepierce/osf.io,SSJohns/osf.io,alexschiller/osf.io,binoculars/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,caseyrygt/osf.io,hmoco/osf.io,acshi/osf.io,lyndsysimon/osf.io,aaxelb/osf.io,dplorimer/osf,jnayak1/osf.io,KAsante95/osf.io,baylee-d/osf.io,abought/osf.io,doublebits/osf.io,danielneis/osf.io,kch8qx/osf.io,samchrisinger/osf.io,mluo613/osf.io,reinaH/osf.io,zamattiac/osf.io,HalcyonChimera/osf.io,cldershem/osf.io,alexschiller/osf.io,billyhunt/osf.io,MerlinZhang/osf.io,ticklemepierce/osf.io,kwierman/osf.io,erinspace/osf.io,ckc6cz/osf.io,cslzchen/osf.io,jmcarp/osf.io,abought/osf.io,crcresearch/osf.io,doublebits/osf.io,TomHeatwole/osf.io,dplorimer/osf,reinaH/osf.io,Nesiehr/osf.io,mfraezz/osf.io,RomanZWang/osf.io,petermalcolm/osf.io,billyhunt/osf.io,crcresearch/osf.io,RomanZWang/osf.io,sloria/osf.io,asanfilippo7/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,TomBaxter/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,samanehsan/osf.io,kwierman/osf.io,abought/osf.io,icereval/osf.io,chrisseto/osf.io,cslzchen/osf.io,danielneis/osf.io,petermalcolm/osf.io,GageGaskins/osf.io,aaxelb/osf.io,mluke93/osf.io,cwisecarver/osf.io,mfraezz/osf.io,GageGaskins/osf.io,emetsger/osf.io,asanfilippo7/osf.io,leb2dg/osf.io,alexschiller/osf.io,jnayak1/osf.io,HarryRybacki/osf.io,asanfilippo7/osf.io,zachjanicki/osf.io,jmcarp/osf.io,DanielSBrown/osf.io,arpitar/osf.io,billyhunt/osf.io,haoyuchen1992/osf.io,Nesiehr/osf.io,zachjanicki/osf.io,zamattiac/osf.io,Nesiehr/osf.io,pattisdr/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,lyndsysimon/osf.io,jolene-esposito/osf.io,kch8qx/osf.io,wearpants/osf.io,laurenrevere/osf.io,doublebits/osf.io,brandonPurvis/osf.io,KAsante95/osf.io,DanielSBrown/osf.io,petermalcolm/osf.io,samanehsan/osf.io,kwierman/osf.io,samanehsan/osf.io,TomHeatwole/osf.io,ZobairAlijan/osf.io,mfraezz/osf.io,SSJohns/osf.io,hmoco/osf.io,amyshi188/osf.io,KAsante95/osf.io,Ghalko/osf.io,caseyrollins/osf.io,DanielSBrown/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,RomanZWang/osf.io,KAsante95/osf.io,ckc6cz/osf.io,pattisdr/osf.io,leb2dg/osf.io,ckc6cz/osf.io,caseyrollins/osf.io,mattclark/osf.io,MerlinZhang/osf.io,chrisseto/osf.io,MerlinZhang/osf.io,caneruguz/osf.io,felliott/osf.io,zachjanicki/osf.io,acshi/osf.io,leb2dg/osf.io,arpitar/osf.io,bdyetton/prettychart,crcresearch/osf.io,cldershem/osf.io,laurenrevere/osf.io,rdhyee/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,emetsger/osf.io,dplorimer/osf,Johnetordoff/osf.io,bdyetton/prettychart,cosenal/osf.io,lyndsysimon/osf.io,emetsger/osf.io,alexschiller/osf.io,erinspace/osf.io,samanehsan/osf.io,mluke93/osf.io,ZobairAlijan/osf.io,adlius/osf.io,adlius/osf.io,HarryRybacki/osf.io,jolene-esposito/osf.io,monikagrabowska/osf.io,mattclark/osf.io,leb2dg/osf.io,alexschiller/osf.io,mluke93/osf.io,chennan47/osf.io,haoyuchen1992/osf.io,felliott/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,caneruguz/osf.io,kch8qx/osf.io,zamattiac/osf.io,kwierman/osf.io,Ghalko/osf.io,sloria/osf.io,felliott/osf.io,SSJohns/osf.io,sbt9uc/osf.io,ckc6cz/osf.io,TomBaxter/osf.io,adlius/osf.io,brandonPurvis/osf.io,sbt9uc/osf.io,reinaH/osf.io,doublebits/osf.io,reinaH/osf.io,emetsger/osf.io,njantrania/osf.io,caseyrygt/osf.io,monikagrabowska/osf.io,billyhunt/osf.io,cslzchen/osf.io,caseyrollins/osf.io,jmcarp/osf.io,chennan47/osf.io,asanfilippo7/osf.io,binoculars/osf.io,billyhunt/osf.io,HalcyonChimera/osf.io,rdhyee/osf.io,jnayak1/osf.io,adlius/osf.io,GageGaskins/osf.io,saradbowman/osf.io,samchrisinger/osf.io,kch8qx/osf.io,cosenal/osf.io,icereval/osf.io,acshi/osf.io,ZobairAlijan/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,brandonPurvis/osf.io,caseyrygt/osf.io,mluo613/osf.io,rdhyee/osf.io,samchrisinger/osf.io,bdyetton/prettychart,pattisdr/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,hmoco/osf.io,Johnetordoff/osf.io,brandonPurvis/osf.io,njantrania/osf.io,sbt9uc/osf.io,Nesiehr/osf.io
|
---
+++
@@ -31,6 +31,8 @@
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.figshare_hgrid_data
+MAX_FILE_SIZE = 50
+
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # use default nodes settings templates
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'figshare_user_settings.mako')
|
7ed3a8452de8d75a09d2ee2265d7fa32b4a25c7c
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Joao Moreira'
SITENAME = 'Joao Moreira'
SITEURL = ''
BIO = 'lorem ipsum doler umpalum paluuu'
PROFILE_IMAGE = "avatar.jpg"
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
DEFAULT_DATE_FORMAT = '%B %-d, %Y'
THEME = "pelican-hyde"
DISPLAY_PAGES_ON_MENU = True
LOAD_CONTENT_CACHE = False
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Social widget
SOCIAL = (('github-square', 'https://github.com/jagmoreira'),
('linkedin', 'https://www.linkedin.com/in/joao-moreira'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Joao Moreira'
SITENAME = 'Joao Moreira'
SITEURL = ''
BIO = 'PhD student. Data scientist. Iron Man fan.'
PROFILE_IMAGE = "avatar.jpg"
PATH = 'content'
STATIC_PATHS = ['images', 'extra/CNAME']
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
DEFAULT_DATE_FORMAT = '%B %-d, %Y'
THEME = "pelican-hyde"
DISPLAY_PAGES_ON_MENU = True
LOAD_CONTENT_CACHE = False
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Social widget
SOCIAL = (('github-square', 'https://github.com/jagmoreira'),
('linkedin', 'https://www.linkedin.com/in/joao-moreira'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
|
Add publication date on github publish
|
Add publication date on github publish
|
Python
|
mit
|
jagmoreira/jagmoreira.github.io,jagmoreira/jagmoreira.github.io
|
---
+++
@@ -5,10 +5,12 @@
AUTHOR = 'Joao Moreira'
SITENAME = 'Joao Moreira'
SITEURL = ''
-BIO = 'lorem ipsum doler umpalum paluuu'
+BIO = 'PhD student. Data scientist. Iron Man fan.'
PROFILE_IMAGE = "avatar.jpg"
PATH = 'content'
+STATIC_PATHS = ['images', 'extra/CNAME']
+EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}
TIMEZONE = 'America/Chicago'
|
92aecd24a28f92d05bdb123d98b19d45fc749427
|
sparts/tasks/periodic.py
|
sparts/tasks/periodic.py
|
from ..vtask import VTask
import time
from ..sparts import option
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
interval = option('interval', type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
self.execute()
to_sleep = time.time() - (t0 + self.interval)
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
def execute(self, context=None):
self.logger.debug('execute')
|
from ..vtask import VTask
import time
from ..sparts import option
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
interval = option('interval', type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
self.execute()
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
#self.incrementCounter('n_slow_intervals')
pass
def execute(self, context=None):
self.logger.debug('execute')
|
Fix PeriodicTask interval sleep calculation
|
Fix PeriodicTask interval sleep calculation
|
Python
|
bsd-3-clause
|
fmoo/sparts,bboozzoo/sparts,facebook/sparts,pshuff/sparts,facebook/sparts,pshuff/sparts,djipko/sparts,djipko/sparts,fmoo/sparts,bboozzoo/sparts
|
---
+++
@@ -24,10 +24,13 @@
while not self.service._stop:
t0 = time.time()
self.execute()
- to_sleep = time.time() - (t0 + self.interval)
+ to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
+ else:
+ #self.incrementCounter('n_slow_intervals')
+ pass
def execute(self, context=None):
self.logger.debug('execute')
|
a4135626721efada6a68dab6cb86ce2dfb687462
|
factory/tools/cat_StarterLog.py
|
factory/tools/cat_StarterLog.py
|
#!/bin/env python
#
# cat_StarterLog.py
#
# Print out the StarterLog for a glidein output file
#
# Usage: cat_StarterLog.py logname
#
import os.path
import sys
STARTUP_DIR=sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,"lib"))
import gWftLogParser
USAGE="Usage: cat_StarterLog.py <logname>"
def main():
try:
print gWftLogParser.get_CondorLog(sys.argv[1],"StarterLog.vm2")
except:
sys.stderr.write("%s\n"%USAGE)
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/bin/env python
#
# cat_StarterLog.py
#
# Print out the StarterLog for a glidein output file
#
# Usage: cat_StarterLog.py logname
#
import os.path
import sys
STARTUP_DIR=sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,"lib"))
import gWftLogParser
USAGE="Usage: cat_StarterLog.py <logname>"
def main():
try:
print gWftLogParser.get_CondorLog(sys.argv[1],"((StarterLog)|(StarterLog.vm2))")
except:
sys.stderr.write("%s\n"%USAGE)
sys.exit(1)
if __name__ == '__main__':
main()
|
Support both old and new format
|
Support both old and new format
|
Python
|
bsd-3-clause
|
bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old
|
---
+++
@@ -17,7 +17,7 @@
def main():
try:
- print gWftLogParser.get_CondorLog(sys.argv[1],"StarterLog.vm2")
+ print gWftLogParser.get_CondorLog(sys.argv[1],"((StarterLog)|(StarterLog.vm2))")
except:
sys.stderr.write("%s\n"%USAGE)
sys.exit(1)
|
9334d20adb15f3a6be393c57c797311e31fcd8fc
|
ConectorDriverComando.py
|
ConectorDriverComando.py
|
# -*- coding: iso-8859-1 -*-
from serial import SerialException
import importlib
import threading
import logging
class ConectorError(Exception):
pass
class ConectorDriverComando:
driver = None
def __init__(self, comando, driver, *args, **kwargs):
logging.getLogger().info("inicializando ConectorDriverComando driver de %s" % driver)
self._comando = comando
self.driver_name = driver
# instanciar el driver dinamicamente segun el driver pasado como parametro
libraryName = "Drivers." + driver + "Driver"
driverModule = importlib.import_module(libraryName)
driverClass = getattr(driverModule, driver + "Driver")
self.driver = driverClass(**kwargs)
def sendCommand(self, *args):
logging.getLogger().info("Enviando comando %s" % args)
return self.driver.sendCommand(*args)
def close(self):
# Si el driver es Receipt, se cierra desde la misma clase del driver, sino, tira error de Bad File Descriptor por querer cerrarlo dos veces.
if self.driver_name == "ReceiptDirectJet":
if self.driver.connected is False:
return None
self.driver.close()
self.driver = None
|
# -*- coding: iso-8859-1 -*-
from serial import SerialException
import importlib
import threading
import logging
class ConectorError(Exception):
pass
class ConectorDriverComando:
driver = None
def __init__(self, comando, driver, *args, **kwargs):
# logging.getLogger().info("inicializando ConectorDriverComando driver de %s" % driver)
logging.getLogger().info("inicializando ConectorDriverComando driver de '${0}'".format(driver))
self._comando = comando
self.driver_name = driver
# instanciar el driver dinamicamente segun el driver pasado como parametro
libraryName = "Drivers." + driver + "Driver"
driverModule = importlib.import_module(libraryName)
driverClass = getattr(driverModule, driver + "Driver")
self.driver = driverClass(**kwargs)
def sendCommand(self, *args):
# logging.getLogger().info("Enviando comando %s" % args)
logging.getLogger().info("Enviando comando '${0}'".format(args))
return self.driver.sendCommand(*args)
def close(self):
# Si el driver es Receipt, se cierra desde la misma clase del driver, sino, tira error de Bad File Descriptor por querer cerrarlo dos veces.
if self.driver_name == "ReceiptDirectJet":
if self.driver.connected is False:
return None
self.driver.close()
self.driver = None
|
FIX Format String Error in Conector Driver Comando
|
FIX Format String Error in Conector Driver Comando
|
Python
|
mit
|
ristorantino/fiscalberry,ristorantino/fiscalberry,ristorantino/fiscalberry,ristorantino/fiscalberry
|
---
+++
@@ -13,7 +13,8 @@
driver = None
def __init__(self, comando, driver, *args, **kwargs):
- logging.getLogger().info("inicializando ConectorDriverComando driver de %s" % driver)
+ # logging.getLogger().info("inicializando ConectorDriverComando driver de %s" % driver)
+ logging.getLogger().info("inicializando ConectorDriverComando driver de '${0}'".format(driver))
self._comando = comando
self.driver_name = driver
@@ -26,7 +27,8 @@
self.driver = driverClass(**kwargs)
def sendCommand(self, *args):
- logging.getLogger().info("Enviando comando %s" % args)
+ # logging.getLogger().info("Enviando comando %s" % args)
+ logging.getLogger().info("Enviando comando '${0}'".format(args))
return self.driver.sendCommand(*args)
def close(self):
|
98dd8df628079357b26a663d24adcbc6ac4d3794
|
indra/__init__.py
|
indra/__init__.py
|
from __future__ import print_function, unicode_literals
import logging
__version__ = '1.3.0'
logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('rdflib').setLevel(logging.ERROR)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
|
from __future__ import print_function, unicode_literals
import logging
__version__ = '1.3.0'
__all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser',
'databases', 'literature',
'preassembler', 'assemblers', 'mechlinker', 'belief',
'tools', 'util']
'''
#############
# For now these imports are disabled because
# (1) Every import would load everything in INDRA which is time consuming and
# (2) Optional dependencies in some modules will try to be loaded even if
# they are not intended to be used
##################
# Core
import statements
# Input processors
from indra import bel
from indra import biopax
from indra import trips
from indra import reach
from indra import index_cards
# Clients
from indra import databases
from indra import literature
# Assemblers
from indra import preassembler
from indra import assemblers
from indra import mechlinker
from indra import belief
# Tools and utils
from indra import tools
from indra import util
'''
logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.ERROR)
logging.getLogger('rdflib').setLevel(logging.ERROR)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
|
Add commented out top-level imports
|
Add commented out top-level imports
|
Python
|
bsd-2-clause
|
pvtodorov/indra,sorgerlab/belpy,jmuhlich/indra,johnbachman/belpy,jmuhlich/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,johnbachman/indra,jmuhlich/indra,sorgerlab/belpy,sorgerlab/indra,pvtodorov/indra,bgyori/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,pvtodorov/indra,johnbachman/belpy,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra
|
---
+++
@@ -1,6 +1,38 @@
from __future__ import print_function, unicode_literals
import logging
__version__ = '1.3.0'
+
+__all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser',
+ 'databases', 'literature',
+ 'preassembler', 'assemblers', 'mechlinker', 'belief',
+ 'tools', 'util']
+'''
+#############
+# For now these imports are disabled because
+# (1) Every import would load everything in INDRA which is time consuming and
+# (2) Optional dependencies in some modules will try to be loaded even if
+# they are not intended to be used
+##################
+# Core
+import statements
+# Input processors
+from indra import bel
+from indra import biopax
+from indra import trips
+from indra import reach
+from indra import index_cards
+# Clients
+from indra import databases
+from indra import literature
+# Assemblers
+from indra import preassembler
+from indra import assemblers
+from indra import mechlinker
+from indra import belief
+# Tools and utils
+from indra import tools
+from indra import util
+'''
logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s',
level=logging.INFO)
|
3e8f45368b949cbd140a2a61fcba7afec563a7a1
|
website/views.py
|
website/views.py
|
import logging
logger = logging.getLogger(__name__)
from django.views.generic import TemplateView
from voting.models import Bill
from voting.models import Member
class HomeView(TemplateView):
template_name = "website/index.html"
context_object_name = "homepage"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
return context
class BillsView(TemplateView):
template_name = "website/bills.html"
context_object_name = "bills"
def get_context_data(self, **kwargs):
context = super(BillsView, self).get_context_data(**kwargs)
bills = Bill.objects.all()
for bill in bills:
bill.votes = bill.get_votes()
context['bills'] = bills
return context
class MembersView(TemplateView):
template_name = "website/members.html"
context_object_name = "members"
def get_context_data(self, **kwargs):
context = super(MembersView, self).get_context_data(**kwargs)
members = Member.objects.all()
context['members'] = members
return context
|
import logging
logger = logging.getLogger(__name__)
from django.views.generic import TemplateView
from voting.models import Bill
from voting.models import Member
class HomeView(TemplateView):
template_name = "website/index.html"
context_object_name = "homepage"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class BillsView(TemplateView):
template_name = "website/bills.html"
context_object_name = "bills"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
bills = Bill.objects.all()
for bill in bills:
bill.votes = bill.get_votes()
context['bills'] = bills
return context
class MembersView(TemplateView):
template_name = "website/members.html"
context_object_name = "members"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
members = Member.objects.all()
context['members'] = members
return context
|
Use modern super() python class function
|
Use modern super() python class function
|
Python
|
mit
|
openkamer/openkamer,openkamer/openkamer,openkamer/openkamer,openkamer/openkamer
|
---
+++
@@ -13,7 +13,7 @@
context_object_name = "homepage"
def get_context_data(self, **kwargs):
- context = super(HomeView, self).get_context_data(**kwargs)
+ context = super().get_context_data(**kwargs)
return context
@@ -22,8 +22,7 @@
context_object_name = "bills"
def get_context_data(self, **kwargs):
- context = super(BillsView, self).get_context_data(**kwargs)
-
+ context = super().get_context_data(**kwargs)
bills = Bill.objects.all()
for bill in bills:
bill.votes = bill.get_votes()
@@ -36,8 +35,7 @@
context_object_name = "members"
def get_context_data(self, **kwargs):
- context = super(MembersView, self).get_context_data(**kwargs)
-
+ context = super().get_context_data(**kwargs)
members = Member.objects.all()
context['members'] = members
return context
|
79c8ab721fd5d00bff3e96b52e6155e16ae255b2
|
skan/test/test_pipe.py
|
skan/test/test_pipe.py
|
import os
import pytest
import tempfile
import pandas
from skan import pipe
@pytest.fixture
def image_filename():
rundir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(rundir, 'data')
return os.path.join(datadir, 'retic.tif')
def test_pipe(image_filename):
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight')
assert type(data[0]) == pandas.DataFrame
assert data[0].shape[0] > 0
def test_pipe_figure(image_filename):
with tempfile.TemporaryDirectory() as tempdir:
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight',
save_skeleton='skeleton-plot-',
output_folder=tempdir)
expected_output = os.path.join(tempdir, 'skeleton-plot-' +
os.path.basename(image_filename)[:-4] +
'.png')
assert os.path.exists(expected_output)
assert type(data[0]) == pandas.DataFrame
assert data[0].shape[0] > 0
|
import os
import pytest
import tempfile
import pandas
from skan import pipe
@pytest.fixture
def image_filename():
rundir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(rundir, 'data')
return os.path.join(datadir, 'retic.tif')
def test_pipe(image_filename):
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight')
assert type(data[0]) == pandas.DataFrame
assert data[0].shape[0] > 0
data2 = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight', crop_radius=75)[0]
assert data2.shape[0] < data[0].shape[0]
def test_pipe_figure(image_filename):
with tempfile.TemporaryDirectory() as tempdir:
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight',
save_skeleton='skeleton-plot-',
output_folder=tempdir)
expected_output = os.path.join(tempdir, 'skeleton-plot-' +
os.path.basename(image_filename)[:-4] +
'.png')
assert os.path.exists(expected_output)
assert type(data[0]) == pandas.DataFrame
assert data[0].shape[0] > 0
|
Add small test for crop parameter to pipe
|
Add small test for crop parameter to pipe
|
Python
|
bsd-3-clause
|
jni/skan
|
---
+++
@@ -18,6 +18,10 @@
assert type(data[0]) == pandas.DataFrame
assert data[0].shape[0] > 0
+ data2 = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
+ 'Scan/PixelHeight', crop_radius=75)[0]
+ assert data2.shape[0] < data[0].shape[0]
+
def test_pipe_figure(image_filename):
with tempfile.TemporaryDirectory() as tempdir:
|
cd138281cbe38ad32507658524a939561aaf77e6
|
pgmapcss/version.py
|
pgmapcss/version.py
|
__all__ = 'VERSION', 'VERSION_INFO'
#: (:class:`tuple`) The version tuple e.g. ``(0, 9, 2)``.
VERSION_INFO = (0, 8, 0)
#: (:class:`basestring`) The version string e.g. ``'0.9.2'``.
if len(VERSION_INFO) == 4:
VERSION = '%d.%d.%d-%s' % VERSION_INFO
elif type(VERSION_INFO[2]) == str:
VERSION = '%d.%d-%s' % VERSION_INFO
else:
VERSION = '%d.%d.%d' % VERSION_INFO
|
__all__ = 'VERSION', 'VERSION_INFO'
#: (:class:`tuple`) The version tuple e.g. ``(0, 9, 2)``.
VERSION_INFO = (0, 9, 'dev')
#: (:class:`basestring`) The version string e.g. ``'0.9.2'``.
if len(VERSION_INFO) == 4:
VERSION = '%d.%d.%d-%s' % VERSION_INFO
elif type(VERSION_INFO[2]) == str:
VERSION = '%d.%d-%s' % VERSION_INFO
else:
VERSION = '%d.%d.%d' % VERSION_INFO
|
Create new v0.9 development branch
|
Create new v0.9 development branch
|
Python
|
agpl-3.0
|
plepe/pgmapcss,plepe/pgmapcss
|
---
+++
@@ -1,7 +1,7 @@
__all__ = 'VERSION', 'VERSION_INFO'
#: (:class:`tuple`) The version tuple e.g. ``(0, 9, 2)``.
-VERSION_INFO = (0, 8, 0)
+VERSION_INFO = (0, 9, 'dev')
#: (:class:`basestring`) The version string e.g. ``'0.9.2'``.
if len(VERSION_INFO) == 4:
|
64cb1130811c5e0e1d547ff7a3a03139b831dea5
|
openacademy/model/openacademy_session.py
|
openacademy/model/openacademy_session.py
|
# -*- coding: utf-8 -*_
from openerp import fields, models
class Session(models.Model):
_name = 'openacademy.session'
name = fields.Char(required=True)
start_date = fields.Date()
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
instructor_id = fields.Many2one('res.partner', string="Instructor")
course_id = fields.Many2one('openacademy.course',
ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
|
# -*- coding: utf-8 -*_
from openerp import fields, models
class Session(models.Model):
_name = 'openacademy.session'
name = fields.Char(required=True)
start_date = fields.Date()
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
instructor_id = fields.Many2one('res.partner', string="Instructor",
domain=['|',
("instructor", "=", True),
("category_id", "ilike", "Teacher"),
])
course_id = fields.Many2one('openacademy.course',
ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
|
Add domain or and ilike
|
[REF] openacademy: Add domain or and ilike
|
Python
|
apache-2.0
|
glizek/openacademy-project
|
---
+++
@@ -9,7 +9,11 @@
start_date = fields.Date()
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
- instructor_id = fields.Many2one('res.partner', string="Instructor")
+ instructor_id = fields.Many2one('res.partner', string="Instructor",
+ domain=['|',
+ ("instructor", "=", True),
+ ("category_id", "ilike", "Teacher"),
+ ])
course_id = fields.Many2one('openacademy.course',
ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
|
6b1ca442624ed1bc61bd816452af62033f975232
|
categories/forms.py
|
categories/forms.py
|
# This file is part of e-Giełda.
# Copyright (C) 2014 Mateusz Maćkowski and Tomasz Zieliński
#
# e-Giełda is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with e-Giełda. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.utils.translation import ugettext_lazy as _
from categories.models import Category
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
labels = {
'name': _("Name")
}
|
# This file is part of e-Giełda.
# Copyright (C) 2014 Mateusz Maćkowski and Tomasz Zieliński
#
# e-Giełda is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with e-Giełda. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.utils.translation import ugettext_lazy as _
from categories.models import Category
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
labels = {
'name': _("Name")
}
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
}
|
Add required attribute to Category name input
|
Add required attribute to Category name input
|
Python
|
agpl-3.0
|
m4tx/egielda,m4tx/egielda,m4tx/egielda
|
---
+++
@@ -22,3 +22,6 @@
labels = {
'name': _("Name")
}
+ widgets = {
+ 'name': forms.TextInput(attrs={'required': 'required'}),
+ }
|
17905bc0f7f21331476d27c6eb302408b4382e4b
|
coffeeoutsidebot.py
|
coffeeoutsidebot.py
|
#!/usr/bin/env python
# CoffeeOutsideBot
# Copyright 2016, David Crosby
# BSD 2-clause license
#
# TODO - add rest of the locations, etc
# TODO - automate weather forecast lookup
# TODO - automate Cyclepalooza event creation
# TODO - clean this ugly thing up
import json
import random
from twitter import *
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('./cb_config.ini')
tcreds = {}
# yuck
for section in ['twitter']:
if parser.has_section(section):
if section == 'twitter':
for option in parser.options('twitter'):
tcreds[option] = parser.get('twitter', option)
print(tcreds)
locations = []
try:
with open('./winter_locations', 'r') as file_handle:
for l in file_handle:
if len(l.strip()) > 0:
locations.append(l.strip())
except IOError, err:
print(err)
print(locations)
location = random.choice(locations)
new_status = "The next #CoffeeOutside is at " + location
print(new_status)
# The Twitter Bits
t = Twitter(
auth=OAuth(tcreds['token'], tcreds['token_secret'], tcreds['consumer_key'], tcreds['consumer_secret']))
t.statuses.update(status=new_status)
|
#!/usr/bin/env python
# CoffeeOutsideBot
# Copyright 2016, David Crosby
# BSD 2-clause license
#
# TODO - add rest of the locations, etc
# TODO - automate weather forecast lookup
# TODO - automate Cyclepalooza event creation
# TODO - clean this ugly thing up
import json
import random
from twitter import *
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('./cb_config.ini')
tcreds = {}
# yuck
for section in ['twitter']:
if parser.has_section(section):
if section == 'twitter':
for option in parser.options('twitter'):
tcreds[option] = parser.get('twitter', option)
print(tcreds)
locations = []
try:
with open('./winter_locations', 'r') as file_handle:
for l in file_handle:
if len(l.strip()) > 0:
locations.append(l.strip())
except IOError, err:
print(err)
location = random.choice(locations)
new_status = "This week's #CoffeeOutside is at " + location + ", see you there!"
print(new_status)
# The Twitter Bits
t = Twitter(
auth=OAuth(tcreds['token'], tcreds['token_secret'], tcreds['consumer_key'], tcreds['consumer_secret']))
t.statuses.update(status=new_status)
|
Make the tweet slightly less robot-y
|
Make the tweet slightly less robot-y
|
Python
|
bsd-2-clause
|
dafyddcrosby/coffeeoutsidebot,yycbike/coffeeoutsidebot,yycbike/coffeeoutsidebot
|
---
+++
@@ -36,10 +36,8 @@
except IOError, err:
print(err)
-print(locations)
-
location = random.choice(locations)
-new_status = "The next #CoffeeOutside is at " + location
+new_status = "This week's #CoffeeOutside is at " + location + ", see you there!"
print(new_status)
# The Twitter Bits
|
c651d511c5c730f1a0ffdcd1a19e15443fda5e9f
|
tests/test_emit_movie_queue.py
|
tests/test_emit_movie_queue.py
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue: yes
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
|
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
from nose.plugins.attrib import attr
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
from tests import FlexGetBase
def age_last_emit(**kwargs):
session = Session()
for item in session.query(QueuedMovie).all():
item.last_emit = datetime.utcnow() - timedelta(**kwargs)
session.commit()
class TestEmitMovieQueue(FlexGetBase):
__yaml__ = """
tasks:
test_default:
emit_movie_queue:
# TODO: Currently plugin calls tmdb lookup to get year, movie queue should probably store
year: no
"""
def test_default(self):
queue_add(title='The Matrix 1999', imdb_id='tt0133093', tmdb_id=603)
self.execute_task('test_default')
assert len(self.task.entries) == 1
# Movie ids should be provided on the entry without needing lookups
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt0133093'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('test_default')
assert len(self.task.entries) == 1, 'Movie should be emitted every run'
|
Make sure emit_movie_queue test doesn't go online
|
Make sure emit_movie_queue test doesn't go online
|
Python
|
mit
|
drwyrm/Flexget,crawln45/Flexget,antivirtel/Flexget,ianstalk/Flexget,spencerjanssen/Flexget,qvazzler/Flexget,Pretagonist/Flexget,Pretagonist/Flexget,cvium/Flexget,tsnoam/Flexget,poulpito/Flexget,tarzasai/Flexget,camon/Flexget,thalamus/Flexget,v17al/Flexget,tsnoam/Flexget,lildadou/Flexget,Flexget/Flexget,ratoaq2/Flexget,tobinjt/Flexget,grrr2/Flexget,poulpito/Flexget,ibrahimkarahan/Flexget,ZefQ/Flexget,malkavi/Flexget,ZefQ/Flexget,voriux/Flexget,qvazzler/Flexget,ZefQ/Flexget,jacobmetrick/Flexget,JorisDeRieck/Flexget,antivirtel/Flexget,Pretagonist/Flexget,X-dark/Flexget,drwyrm/Flexget,sean797/Flexget,thalamus/Flexget,JorisDeRieck/Flexget,drwyrm/Flexget,ianstalk/Flexget,ratoaq2/Flexget,tvcsantos/Flexget,Danfocus/Flexget,tsnoam/Flexget,qk4l/Flexget,jawilson/Flexget,tarzasai/Flexget,ibrahimkarahan/Flexget,grrr2/Flexget,X-dark/Flexget,Danfocus/Flexget,crawln45/Flexget,sean797/Flexget,jawilson/Flexget,ianstalk/Flexget,Flexget/Flexget,grrr2/Flexget,crawln45/Flexget,v17al/Flexget,patsissons/Flexget,qk4l/Flexget,jacobmetrick/Flexget,tobinjt/Flexget,spencerjanssen/Flexget,v17al/Flexget,cvium/Flexget,xfouloux/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,X-dark/Flexget,malkavi/Flexget,oxc/Flexget,jacobmetrick/Flexget,offbyone/Flexget,Danfocus/Flexget,qk4l/Flexget,tarzasai/Flexget,Flexget/Flexget,tobinjt/Flexget,malkavi/Flexget,malkavi/Flexget,patsissons/Flexget,cvium/Flexget,vfrc2/Flexget,antivirtel/Flexget,vfrc2/Flexget,thalamus/Flexget,lildadou/Flexget,dsemi/Flexget,gazpachoking/Flexget,OmgOhnoes/Flexget,ratoaq2/Flexget,crawln45/Flexget,xfouloux/Flexget,dsemi/Flexget,jawilson/Flexget,patsissons/Flexget,qvazzler/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,tvcsantos/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,LynxyssCZ/Flexget,poulpito/Flexget,oxc/Flexget,ibrahimkarahan/Flexget,sean797/Flexget,JorisDeRieck/Flexget,xfouloux/Flexget,dsemi/Flexget,offbyone/Flexget,jawilson/Flexget,camon/Flexget,voriux/Flexget,vfrc2/Flexget,spencerjanssen/Flexget,gazpachoking/Flexget,lildadou/Flexget,tobinjt/Flexget,OmgOhnoes/Flexget,Danfocus/Flexget,oxc/Flexget,LynxyssCZ/Flexget
|
---
+++
@@ -1,5 +1,7 @@
from __future__ import unicode_literals, division, absolute_import
from datetime import timedelta, datetime
+
+from nose.plugins.attrib import attr
from flexget.manager import Session
from flexget.plugins.filter.movie_queue import queue_add, QueuedMovie
@@ -17,7 +19,9 @@
__yaml__ = """
tasks:
test_default:
- emit_movie_queue: yes
+ emit_movie_queue:
+ # TODO: Currently plugin calls tmdb lookup to get year, movie queue should probably store
+ year: no
"""
def test_default(self):
|
c99bf0a57a2e257259890df72e948d6030288aaf
|
couchdb/tests/testutil.py
|
couchdb/tests/testutil.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import uuid
from couchdb import client
class TempDatabaseMixin(object):
temp_dbs = None
_db = None
def setUp(self):
self.server = client.Server(full_commit=False)
def tearDown(self):
if self.temp_dbs:
for name in self.temp_dbs:
self.server.delete(name)
def temp_db(self):
if self.temp_dbs is None:
self.temp_dbs = {}
name = 'couchdb-python/' + uuid.uuid4().hex
db = self.server.create(name)
self.temp_dbs[name] = db
return name, db
def del_db(self, name):
del self.temp_dbs[name]
self.server.delete(name)
@property
def db(self):
if self._db is None:
name, self._db = self.temp_db()
return self._db
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import random
import sys
from couchdb import client
class TempDatabaseMixin(object):
temp_dbs = None
_db = None
def setUp(self):
self.server = client.Server(full_commit=False)
def tearDown(self):
if self.temp_dbs:
for name in self.temp_dbs:
self.server.delete(name)
def temp_db(self):
if self.temp_dbs is None:
self.temp_dbs = {}
# Find an unused database name
while True:
name = 'couchdb-python/%d' % random.randint(0, sys.maxint)
if name not in self.temp_dbs:
break
print '%s already used' % name
db = self.server.create(name)
self.temp_dbs[name] = db
return name, db
def del_db(self, name):
del self.temp_dbs[name]
self.server.delete(name)
@property
def db(self):
if self._db is None:
name, self._db = self.temp_db()
return self._db
|
Use a random number instead of uuid for temp database name.
|
Use a random number instead of uuid for temp database name.
|
Python
|
bsd-3-clause
|
erikdejonge/rabshakeh-couchdb-python-progress-attachments
|
---
+++
@@ -6,7 +6,8 @@
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
-import uuid
+import random
+import sys
from couchdb import client
class TempDatabaseMixin(object):
@@ -25,7 +26,12 @@
def temp_db(self):
if self.temp_dbs is None:
self.temp_dbs = {}
- name = 'couchdb-python/' + uuid.uuid4().hex
+ # Find an unused database name
+ while True:
+ name = 'couchdb-python/%d' % random.randint(0, sys.maxint)
+ if name not in self.temp_dbs:
+ break
+ print '%s already used' % name
db = self.server.create(name)
self.temp_dbs[name] = db
return name, db
|
7420030ef8253580942412c479f2868ea7091eaa
|
config-example.py
|
config-example.py
|
"""
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
|
"""
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# The size of the pictures the webcamera takes. As of 2022-03-06, the guild
# room has a Creative Live! Cam Sync HD USB webcamera, which at least claims to
# be 720p
camera_dimensions = (1280, 720)
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
|
Add camera image dimensions to config
|
Add camera image dimensions to config
|
Python
|
mit
|
mgunyho/kiltiskahvi
|
---
+++
@@ -8,6 +8,11 @@
# the tg username of the bot's admin.
admin_username = ""
+# The size of the pictures the webcamera takes. As of 2022-03-06, the guild
+# room has a Creative Live! Cam Sync HD USB webcamera, which at least claims to
+# be 720p
+camera_dimensions = (1280, 720)
+
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
|
456b72757cda81c8dd6634ae41b8a1008ff59087
|
config-example.py
|
config-example.py
|
"""
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# The size of the pictures the webcamera takes. As of 2022-03-06, the guild
# room has a Creative Live! Cam Sync HD USB webcamera, which at least claims to
# be 720p
camera_dimensions = (1280, 720)
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
|
"""
Minimal config file for kahvibot. Just define values as normal Python code.
"""
# put your bot token here as a string
bot_token = ""
# the tg username of the bot's admin.
admin_username = ""
# The size of the pictures the webcamera takes. As of 2022-03-06, the guild
# room has a Creative Live! Cam Sync HD USB webcamera, which at least claims to
# be 720p
camera_dimensions = (1280, 720)
# Use this picture as a watermark, for sponsorships etc. Should be a PNG image
# with transparency. It is overlaid directly with the camera image, so it
# should have the same dimensions as `camera_dimensions` above. Leave as an
# empty string to have no watermark.
watermark_path = ""
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
"\u2615", # coffee emoji
"tsufe",
"kahavi",
#"sima", # wappu mode
]
|
Add watermark path to example config
|
Add watermark path to example config
|
Python
|
mit
|
mgunyho/kiltiskahvi
|
---
+++
@@ -13,6 +13,14 @@
# be 720p
camera_dimensions = (1280, 720)
+
+# Use this picture as a watermark, for sponsorships etc. Should be a PNG image
+# with transparency. It is overlaid directly with the camera image, so it
+# should have the same dimensions as `camera_dimensions` above. Leave as an
+# empty string to have no watermark.
+watermark_path = ""
+
+
# if a message contains any of these words, the bot responds
trigger_words = [
"kahvi",
|
bbe765d404ff756e5a8cc828e6aa744dd6228285
|
djlint/analyzers/context_processors.py
|
djlint/analyzers/context_processors.py
|
import ast
from .base import BaseAnalyzer, ModuleVisitor, Result
class ContextProcessorsVisitor(ast.NodeVisitor):
def __init__(self):
self.found = []
deprecated_items = {
'django.core.context_processors.auth':
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.PermWrapper':
'django.contrib.auth.context_processors.PermWrapper',
'django.core.context_processors.PermLookupDict':
'django.contrib.auth.context_processors.PermLookupDict',
}
def visit_Str(self, node):
if node.s in self.deprecated_items.keys():
self.found.append((node.s, node))
class ContextProcessorsAnalyzer(BaseAnalyzer):
def analyze_file(self, filepath, code):
if not isinstance(code, ast.AST):
return
visitor = ContextProcessorsVisitor()
visitor.visit(code)
for name, node in visitor.found:
propose = visitor.deprecated_items[name]
result = Result(
description = (
'%r function is deprecated, use %r instead' % (name, propose)
),
path = filepath,
line = node.lineno)
lines = self.get_file_lines(filepath, node.lineno, node.lineno)
for lineno, important, text in lines:
result.source.add_line(lineno, text, important)
result.solution.add_line(lineno, text.replace(name, propose), important)
yield result
|
import ast
from .base import BaseAnalyzer, ModuleVisitor, Result
class ContextProcessorsVisitor(ast.NodeVisitor):
def __init__(self):
self.found = []
removed_items = {
'django.core.context_processors.auth':
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.PermWrapper':
'django.contrib.auth.context_processors.PermWrapper',
'django.core.context_processors.PermLookupDict':
'django.contrib.auth.context_processors.PermLookupDict',
}
def visit_Str(self, node):
if node.s in self.removed_items.keys():
self.found.append((node.s, node))
class ContextProcessorsAnalyzer(BaseAnalyzer):
def analyze_file(self, filepath, code):
if not isinstance(code, ast.AST):
return
visitor = ContextProcessorsVisitor()
visitor.visit(code)
for name, node in visitor.found:
propose = visitor.removed_items[name]
result = Result(
description = (
'%r function is removed in Django >=1.4, use %r instead'
% (name, propose)
),
path = filepath,
line = node.lineno)
lines = self.get_file_lines(filepath, node.lineno, node.lineno)
for lineno, important, text in lines:
result.source.add_line(lineno, text, important)
result.solution.add_line(lineno, text.replace(name, propose), important)
yield result
|
Update context processors analyzer to target Django 1.4
|
Update context processors analyzer to target Django 1.4
|
Python
|
isc
|
alfredhq/djlint
|
---
+++
@@ -8,7 +8,7 @@
def __init__(self):
self.found = []
- deprecated_items = {
+ removed_items = {
'django.core.context_processors.auth':
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.PermWrapper':
@@ -18,7 +18,7 @@
}
def visit_Str(self, node):
- if node.s in self.deprecated_items.keys():
+ if node.s in self.removed_items.keys():
self.found.append((node.s, node))
@@ -30,10 +30,11 @@
visitor = ContextProcessorsVisitor()
visitor.visit(code)
for name, node in visitor.found:
- propose = visitor.deprecated_items[name]
+ propose = visitor.removed_items[name]
result = Result(
description = (
- '%r function is deprecated, use %r instead' % (name, propose)
+ '%r function is removed in Django >=1.4, use %r instead'
+ % (name, propose)
),
path = filepath,
line = node.lineno)
|
f95754249f3ffa364def26741b7a875521d7dec1
|
src/main/translator-xml/XMLTranslator.py
|
src/main/translator-xml/XMLTranslator.py
|
#!/usr/bin/env python
import sys
from xml.dom import minidom
class XMLTranslator:
# Parse any other node of the PML file
def parse_nodes(self, nodes, depth, processes_sofar, process_current, resources_sofar):
pass
# Parse Process, the outermost level of a PML file
def parse_process(self, node):
processes = [] # List of Promela proctypes
resources = [] # List of resources
procname = node.getElementsByTagName("ID")[0].getAttribute("value")
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(node.childNodes, 0, processes, process_main, resources)
process_main.append("}")
# Assemble resources and processes into translation
translation = []
'''
for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
translation.append(resource)
translation.append("")
'''
for process in processes:
for line in process:
translation.append(line)
return translation
def translate_xml(self, xml_string):
xml_tree = minidom.parseString(xml_string)
print xml_tree.toxml()
translation = self.parse_process(xml_tree)
return translation
|
#!/usr/bin/env python
from xml.dom import minidom
class XMLTranslator:
# Parse any other node of the PML file
def parse_nodes(self, nodes, depth, processes_sofar, process_current, resources_sofar):
pass
# Parse Process, the outermost level of a PML file
def parse_process(self, node):
processes = [] # List of Promela proctypes
resources = [] # List of resources
procname = node.getElementsByTagName("ID")[0].getAttribute("value")
process_main = ["active proctype " + procname + "()", "{"]
processes.append(process_main)
# Parse inner tree nodes
self.parse_nodes(node.childNodes, 0, processes, process_main, resources)
process_main.append("}")
# Assemble resources and processes into translation
translation = []
'''
for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
translation.append(resource)
translation.append("")
'''
for process in processes:
for line in process:
translation.append(line)
return translation
def translate_xml(self, xml_string):
xml_tree = minidom.parseString(xml_string)
print xml_tree.toxml()
translation = self.parse_process(xml_tree)
return translation
|
Change indentation to conform to PEP8
|
Change indentation to conform to PEP8
|
Python
|
mit
|
CS4098/GroupProject,CS4098/GroupProject,CS4098/GroupProject
|
---
+++
@@ -1,43 +1,41 @@
#!/usr/bin/env python
-import sys
from xml.dom import minidom
class XMLTranslator:
+ # Parse any other node of the PML file
+ def parse_nodes(self, nodes, depth, processes_sofar, process_current, resources_sofar):
+ pass
- # Parse any other node of the PML file
- def parse_nodes(self, nodes, depth, processes_sofar, process_current, resources_sofar):
- pass
+ # Parse Process, the outermost level of a PML file
+ def parse_process(self, node):
+ processes = [] # List of Promela proctypes
+ resources = [] # List of resources
- # Parse Process, the outermost level of a PML file
- def parse_process(self, node):
- processes = [] # List of Promela proctypes
- resources = [] # List of resources
+ procname = node.getElementsByTagName("ID")[0].getAttribute("value")
+ process_main = ["active proctype " + procname + "()", "{"]
+ processes.append(process_main)
- procname = node.getElementsByTagName("ID")[0].getAttribute("value")
- process_main = ["active proctype " + procname + "()", "{"]
- processes.append(process_main)
+ # Parse inner tree nodes
+ self.parse_nodes(node.childNodes, 0, processes, process_main, resources)
- # Parse inner tree nodes
- self.parse_nodes(node.childNodes, 0, processes, process_main, resources)
+ process_main.append("}")
- process_main.append("}")
+ # Assemble resources and processes into translation
+ translation = []
+ '''
+ for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
+ translation.append(resource)
+ translation.append("")
+ '''
+ for process in processes:
+ for line in process:
+ translation.append(line)
- # Assemble resources and processes into translation
- translation = []
- '''
- for resource in resources: # FIXME: not sure this is where resources should be going - scoping?
- translation.append(resource)
- translation.append("")
- '''
- for process in processes:
- for line in process:
- translation.append(line)
+ return translation
- return translation
-
- def translate_xml(self, xml_string):
- xml_tree = minidom.parseString(xml_string)
- print xml_tree.toxml()
- translation = self.parse_process(xml_tree)
- return translation
+ def translate_xml(self, xml_string):
+ xml_tree = minidom.parseString(xml_string)
+ print xml_tree.toxml()
+ translation = self.parse_process(xml_tree)
+ return translation
|
9ea98f37ca4c1ea00fd6c77d5a651b4a928a237d
|
fix_past_due_issue.py
|
fix_past_due_issue.py
|
import sys
from datetime import datetime
from courtutils.databases.postgres import PostgresDatabase
from courtreader import readers
from courtutils.logger import get_logger
log = get_logger()
reader = readers.DistrictCourtReader()
reader.connect()
db = PostgresDatabase('district')
def update_case(fips):
cases_to_fix = db.get_cases_with_no_past_due(fips, 'criminal')
for case_to_fix in cases_to_fix:
case = {
'fips': fips,
'case_number': case_to_fix[0],
'details_fetched_for_hearing_date': case_to_fix[1],
'collected': datetime.now()
}
case['details'] = reader.get_case_details_by_number(
fips, 'criminal', case_to_fix[0],
case['details_url'] if 'details_url' in case else None)
if 'error' in case['details']:
log.warn('Could not collect case details for %s in %s',
case_to_fix[0], case['fips'])
else:
log.info('%s %s', fips, case['details']['CaseNumber'])
db.replace_case_details(case, 'criminal')
if len(sys.argv) > 2:
update_case(sys.argv[2])
else:
courts = list(db.get_courts())
for court in courts:
update_case(court['fips'])
|
import sys
from datetime import datetime
from courtutils.databases.postgres import PostgresDatabase
from courtreader import readers
from courtutils.logger import get_logger
log = get_logger()
reader = readers.DistrictCourtReader()
reader.connect()
db = PostgresDatabase('district')
def update_case(fips):
cases_to_fix = db.get_cases_with_no_past_due(fips, 'criminal')
for case_to_fix in cases_to_fix:
case = {
'fips': fips,
'case_number': case_to_fix[0],
'details_fetched_for_hearing_date': case_to_fix[1],
'collected': datetime.now()
}
case['details'] = reader.get_case_details_by_number(
fips, 'criminal', case_to_fix[0],
case['details_url'] if 'details_url' in case else None)
if 'error' in case['details']:
log.warn('Could not collect case details for %s in %s',
case_to_fix[0], case['fips'])
else:
log.info('%s %s', fips, case['details']['CaseNumber'])
db.replace_case_details(case, 'criminal')
if len(sys.argv) > 1:
update_case(sys.argv[1])
else:
courts = list(db.get_courts())
for court in courts:
update_case(court['fips'])
|
Fix mistake in last commit
|
Fix mistake in last commit
|
Python
|
mit
|
bschoenfeld/va-court-scraper,bschoenfeld/va-court-scraper
|
---
+++
@@ -28,8 +28,8 @@
log.info('%s %s', fips, case['details']['CaseNumber'])
db.replace_case_details(case, 'criminal')
-if len(sys.argv) > 2:
- update_case(sys.argv[2])
+if len(sys.argv) > 1:
+ update_case(sys.argv[1])
else:
courts = list(db.get_courts())
for court in courts:
|
26369658ffab0a2672129a1595d0b7b6ab7d49f1
|
django/santropolFeast/member/tests.py
|
django/santropolFeast/member/tests.py
|
from django.test import TestCase
from member.models import Member
from datetime import date
class MemberTestCase(TestCase):
def setUp(self):
Member.objects.create(firstname='Katrina', birthdate=date(1980, 4, 19))
def test_age_on_date(self):
"""The age on given date is properly computed"""
katrina = Member.objects.get(firstname='Katrina')
self.assertEqual(katrina.age_on_date(date(2016, 4, 19)), 36)
self.assertEqual(katrina.age_on_date(date(1950, 4, 19)), 0)
self.assertEqual(katrina.age_on_date(katrina.birthdate), 0)
|
from django.test import TestCase
from member.models import Member
from datetime import date
class MemberTestCase(TestCase):
def setUp(self):
Member.objects.create(firstname='Katrina', birthdate=date(1980, 4, 19))
def test_age_on_date(self):
"""The age on given date is properly computed"""
katrina = Member.objects.get(firstname='Katrina')
self.assertEqual(katrina.age_on_date(date(2016, 4, 19)), 36)
self.assertEqual(katrina.age_on_date(date(1950, 4, 19)), 0)
self.assertEqual(katrina.age_on_date(katrina.birthdate), 0)
|
Fix blank lines for PEP-8
|
Fix blank lines for PEP-8
|
Python
|
agpl-3.0
|
savoirfairelinux/santropol-feast,madmath/sous-chef,savoirfairelinux/sous-chef,savoirfairelinux/sous-chef,madmath/sous-chef,savoirfairelinux/sous-chef,savoirfairelinux/santropol-feast,savoirfairelinux/santropol-feast,madmath/sous-chef
|
---
+++
@@ -1,6 +1,7 @@
from django.test import TestCase
from member.models import Member
from datetime import date
+
class MemberTestCase(TestCase):
|
0af76c93eab508ca93228ce902427df35ff34bca
|
microscopes/lda/runner.py
|
microscopes/lda/runner.py
|
"""Implements the Runner interface fo LDA
"""
from microscopes.common import validator
from microscopes.common.rng import rng
from microscopes.lda.kernels import lda_crp_gibbs
from microscopes.lda.kernels import lda_sample_dispersion
class runner(object):
"""The LDA runner
Parameters
----------
defn : ``model_definition``
The structural definition.
view : dataview
A list of list of serializable objects (the 'documents')
latent : ``state``
The initialization state.
kernel_config : list
"""
def __init__(self, defn, view, latent, kernel_config='assign'):
self._defn = defn
self._view = view
self._latent = latent
def run(self, r, niters=10000):
"""Run the specified lda kernel for `niters`, in a single
thread.
Parameters
----------
r : random state
niters : int
"""
validator.validate_type(r, rng, param_name='r')
validator.validate_positive(niters, param_name='niters')
for _ in xrange(niters):
lda_crp_gibbs(self._latent, r)
lda_sample_dispersion(self._latent, r)
|
"""Implements the Runner interface fo LDA
"""
from microscopes.common import validator
from microscopes.common.rng import rng
from microscopes.lda.kernels import lda_crp_gibbs
from microscopes.lda.kernels import lda_sample_dispersion
class runner(object):
"""The LDA runner
Parameters
----------
defn : ``model_definition``
The structural definition.
view : dataview
A list of list of serializable objects (the 'documents')
latent : ``state``
The initialization state.
kernel_config : list
"""
def __init__(self, defn, view, latent, kernel_config='assign'):
self._defn = defn
self._view = view
self._latent = latent
def run(self, r, niters=10000):
"""Run the specified lda kernel for `niters`, in a single
thread.
Parameters
----------
r : random state
niters : int
"""
validator.validate_type(r, rng, param_name='r')
validator.validate_positive(niters, param_name='niters')
for _ in xrange(niters):
lda_crp_gibbs(self._latent, r)
|
Disable hyperparam inference for now
|
Disable hyperparam inference for now
|
Python
|
bsd-3-clause
|
datamicroscopes/lda,datamicroscopes/lda,datamicroscopes/lda
|
---
+++
@@ -46,4 +46,3 @@
for _ in xrange(niters):
lda_crp_gibbs(self._latent, r)
- lda_sample_dispersion(self._latent, r)
|
2909374a77ac3cd5e2247dda3433c520ad043c71
|
nova/__init__.py
|
nova/__init__.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Required to play nicely with namespace composition (PEP420).
__import__('pkg_resources').declare_namespace(__name__)
|
Allow compute driver to load correctly
|
Allow compute driver to load correctly
In certain environments the load order can be confused between nova
and zun. This can cause boot issues where 'No module named cmd.compute'
can be thrown because the python code is looking in zun/nova for that
driver.
This change extends the declare_namespace code to allow for the other
projects to properly load.
Similar fix: https://review.openstack.org/#/c/311724/
Closes-Bug: #1650107
Change-Id: Ibb12f73a378dcc30a0483422226f57d52b53fde9
|
Python
|
apache-2.0
|
kevin-zhaoshuai/zun,kevin-zhaoshuai/zun,kevin-zhaoshuai/zun
|
---
+++
@@ -0,0 +1,14 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Required to play nicely with namespace composition (PEP420).
+__import__('pkg_resources').declare_namespace(__name__)
|
|
4b222e60b5ae6ff9c3390c033356c303e8af3900
|
h2o-py/tests/testdir_munging/pyunit_length.py
|
h2o-py/tests/testdir_munging/pyunit_length.py
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def length_check():
# Connect to a pre-existing cluster
frame = h2o.import_file(path=h2o.locate("smalldata/junit/cars_trim.csv"), col_types=["string","numeric","numeric","numeric","numeric","numeric","numeric","numeric"])
# single column (frame)
length_frame = frame["name"].length()
assert length_frame[0,0] == 28, "Expected 28, but got {}".format(length_frame[0,0])
assert length_frame[1,0] == 27, "Expected 27, but got {}".format(length_frame[1,0])
assert length_frame[2,0] == 19, "Expected 19, but got {}".format(length_frame[2,0])
# single column (vec)
vec = frame["name"]
trimmed_vec = vec.trim()
length_vec = trimmed_vec.length()
assert length_vec[0,0] == 23, "Expected 23, but got {}".format(length_vec[0,0])
assert length_vec[1,0] == 18, "Expected 18, but got {}".format(length_vec[1,0])
assert length_vec[2,0] == 18, "Expected 18, but got {}".format(length_vec[2,0])
if __name__ == "__main__":
tests.run_test(sys.argv, length_check)
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def length_check():
# Connect to a pre-existing cluster
frame = h2o.import_file(path=h2o.locate("smalldata/junit/cars_trim.csv"), col_types=["string","numeric","numeric","numeric","numeric","numeric","numeric","numeric"])
# single column (frame)
length_frame = frame["name"].length()
assert length_frame[0,0] == 26, "Expected 26, but got {}".format(length_frame[0,0])
assert length_frame[1,0] == 19, "Expected 19, but got {}".format(length_frame[1,0])
assert length_frame[2,0] == 19, "Expected 19, but got {}".format(length_frame[2,0])
# single column (vec)
vec = frame["name"]
trimmed_vec = vec.trim()
length_vec = trimmed_vec.length()
assert length_vec[0,0] == 23, "Expected 23, but got {}".format(length_vec[0,0])
assert length_vec[1,0] == 18, "Expected 18, but got {}".format(length_vec[1,0])
assert length_vec[2,0] == 18, "Expected 18, but got {}".format(length_vec[2,0])
if __name__ == "__main__":
tests.run_test(sys.argv, length_check)
|
Update test to reflect spaces without quotes in dataset.
|
Update test to reflect spaces without quotes in dataset.
|
Python
|
apache-2.0
|
madmax983/h2o-3,YzPaul3/h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,kyoren/https-github.com-h2oai-h2o-3,madmax983/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,mathemage/h2o-3,mathemage/h2o-3,spennihana/h2o-3,pchmieli/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,pchmieli/h2o-3,spennihana/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,pchmieli/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-dev,michalkurka/h2o-3,kyoren/https-github.com-h2oai-h2o-3,brightchen/h2o-3,michalkurka/h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,spennihana/h2o-3,madmax983/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,YzPaul3/h2o-3,brightchen/h2o-3,YzPaul3/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,spennihana/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,kyoren/https-github.com-h2oai-h2o-3,mathemage/h2o-3,pchmieli/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,mathemage/h2o-3,brightchen/h2o-3,pchmieli/h2o-3,kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,madmax983/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,h2oai/h2o-3,pchmieli/h2o-3,YzPaul3/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,madmax983/h2o-3,pchmieli/h2o-3,jangorecki/h2o-3,kyoren/https-github.com-h2oai-h2o-3,kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-3,spennihana/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,mathemage/h2o-3,brightchen/h2o-3
|
---
+++
@@ -9,8 +9,8 @@
# single column (frame)
length_frame = frame["name"].length()
- assert length_frame[0,0] == 28, "Expected 28, but got {}".format(length_frame[0,0])
- assert length_frame[1,0] == 27, "Expected 27, but got {}".format(length_frame[1,0])
+ assert length_frame[0,0] == 26, "Expected 26, but got {}".format(length_frame[0,0])
+ assert length_frame[1,0] == 19, "Expected 19, but got {}".format(length_frame[1,0])
assert length_frame[2,0] == 19, "Expected 19, but got {}".format(length_frame[2,0])
# single column (vec)
|
b2e26c044e9d5890945e01364d62f814fcd07949
|
test/unit/interfaces/test_group_dicom.py
|
test/unit/interfaces/test_group_dicom.py
|
import os
from nose.tools import (assert_equal, assert_in, assert_true)
from ...helpers.logging import logger
from qipipe.interfaces import GroupDicom
from ... import ROOT
from ...helpers.logging import logger
# The test fixture.
FIXTURE = os.path.join(ROOT, 'fixtures', 'staging', 'breast', 'BreastChemo3',
'Visit1')
class TestGroupDicom(object):
"""GroupDicom interface unit tests."""
def test_group_dicom(self):
logger(__name__).debug("Testing the GroupDicom interface on %s..."
% FIXTURE)
grouper = GroupDicom(tag='SeriesNumber', in_files=FIXTURE)
result = grouper.run()
ser_dict = result.outputs.series_files_dict
assert_true(not not ser_dict, "GroupDicom did not group the files")
for series in [7, 33]:
assert_in(series, ser_dict, "GroupDicom did not group the"
" series %d" % series)
assert_equal(len(ser_dict[series]), 1, "Too many DICOM files were"
" grouped in series %d: %d" %
(series, len(ser_dict[series])))
logger(__name__).debug("GroupDicom interface test completed")
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
import os
from nose.tools import (assert_equal, assert_in, assert_true)
from ...helpers.logging import logger
from qipipe.interfaces import GroupDicom
from ... import ROOT
from ...helpers.logging import logger
# The test fixture.
FIXTURE = os.path.join(ROOT, 'fixtures', 'staging', 'breast', 'BreastChemo3',
'Visit1')
class TestGroupDicom(object):
"""GroupDicom interface unit tests."""
def test_group_dicom(self):
logger(__name__).debug("Testing the GroupDicom interface on %s..."
% FIXTURE)
grouper = GroupDicom(tag='AcquisitionNumber', in_files=FIXTURE)
result = grouper.run()
grp_dict = result.outputs.groups
assert_true(not not grp_dict, "GroupDicom did not group the files")
for volume in [1, 14]:
assert_in(volume, grp_dict, "GroupDicom did not group the volume %d"
% volume)
assert_equal(len(grp_dict[volume]), 1, "Too many DICOM files were"
" grouped into volume %d: %d" %
(volume, len(grp_dict[volume])))
logger(__name__).debug("GroupDicom interface test completed")
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
Test the volume rather than series.
|
Test the volume rather than series.
|
Python
|
bsd-2-clause
|
ohsu-qin/qipipe
|
---
+++
@@ -17,19 +17,21 @@
def test_group_dicom(self):
logger(__name__).debug("Testing the GroupDicom interface on %s..."
% FIXTURE)
- grouper = GroupDicom(tag='SeriesNumber', in_files=FIXTURE)
+ grouper = GroupDicom(tag='AcquisitionNumber', in_files=FIXTURE)
result = grouper.run()
- ser_dict = result.outputs.series_files_dict
- assert_true(not not ser_dict, "GroupDicom did not group the files")
- for series in [7, 33]:
- assert_in(series, ser_dict, "GroupDicom did not group the"
- " series %d" % series)
- assert_equal(len(ser_dict[series]), 1, "Too many DICOM files were"
- " grouped in series %d: %d" %
- (series, len(ser_dict[series])))
+ grp_dict = result.outputs.groups
+ assert_true(not not grp_dict, "GroupDicom did not group the files")
+ for volume in [1, 14]:
+ assert_in(volume, grp_dict, "GroupDicom did not group the volume %d"
+ % volume)
+ assert_equal(len(grp_dict[volume]), 1, "Too many DICOM files were"
+ " grouped into volume %d: %d" %
+ (volume, len(grp_dict[volume])))
logger(__name__).debug("GroupDicom interface test completed")
+
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
+
|
7ed78836d1389a9a3998d154b08c0f8e331d3e87
|
inthe_am/taskmanager/viewsets/activity_log.py
|
inthe_am/taskmanager/viewsets/activity_log.py
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .. import models
from ..serializers.activity_log import ActivityLogSerializer
class ActivityLogViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly, )
serializer_class = ActivityLogSerializer
def get_queryset(self):
return models.TaskStoreActivityLog.objects.filter(
store__user=self.request.user
).order_by('-last_seen')
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .. import models
from ..serializers.activity_log import ActivityLogSerializer
class ActivityLogViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly, )
serializer_class = ActivityLogSerializer
def get_queryset(self):
if not self.request.user.is_authenticated():
return models.TaskStoreActivityLog.objects.none()
return models.TaskStoreActivityLog.objects.filter(
store__user=self.request.user
).order_by('-last_seen')
|
Return an empty activity log list for unauthenticated users.
|
Return an empty activity log list for unauthenticated users.
|
Python
|
agpl-3.0
|
coddingtonbear/inthe.am,coddingtonbear/inthe.am,coddingtonbear/inthe.am,coddingtonbear/inthe.am,coddingtonbear/inthe.am
|
---
+++
@@ -10,6 +10,8 @@
serializer_class = ActivityLogSerializer
def get_queryset(self):
+ if not self.request.user.is_authenticated():
+ return models.TaskStoreActivityLog.objects.none()
return models.TaskStoreActivityLog.objects.filter(
store__user=self.request.user
).order_by('-last_seen')
|
63ad1bc8f237a90975c7fa883143021faa679efd
|
pkit/__init__.py
|
pkit/__init__.py
|
version = (0, 1, 0)
__title__ = "Process Kit"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
from pkit.process import Process
|
version = (0, 1, 0)
__title__ = "Process Kit"
__author__ = "Oleiade"
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
|
Add a wait option to Process.terminate
|
Add a wait option to Process.terminate
|
Python
|
mit
|
botify-labs/process-kit
|
---
+++
@@ -5,5 +5,3 @@
__license__ = "MIT"
__version__ = '.'.join(map(str, version))
-
-from pkit.process import Process
|
4166bf21aa8ff9264724ef8101231557f40b80ef
|
production.py
|
production.py
|
from flask import Flask, render_template, jsonify, make_response, request, current_app
from gevent import monkey
from gevent import wsgi
import app
monkey.patch_all()
app = Flask(__name__)
server = wsgi.WSGIServer(('203.29.62.211', 5050), app)
server.serve_forever()
|
from flask import Flask, render_template, jsonify, make_response, request, current_app
from gevent import monkey
from gevent import wsgi
import app
monkey.patch_all()
app = Flask(__name__)
server = wsgi.WSGIServer(('203.29.62.211', 5050), app)
server.serve_forever()
@app.route('/')
def index():
return render_template('index.html')
|
Add one route so that our monitoring system stops thinking this system is down
|
Add one route so that our monitoring system stops thinking this system is down
|
Python
|
apache-2.0
|
ishgroup/lightbook,ishgroup/lightbook,ishgroup/lightbook
|
---
+++
@@ -8,3 +8,7 @@
server = wsgi.WSGIServer(('203.29.62.211', 5050), app)
server.serve_forever()
+
+@app.route('/')
+def index():
+ return render_template('index.html')
|
8cfdc9ddf14b44ee2deeef42dd990b5313caf2cf
|
src/keybar/api/endpoints/users.py
|
src/keybar/api/endpoints/users.py
|
from allauth.account.forms import SignupForm
from rest_framework.response import Response
from keybar.api.base import Endpoint, ListEndpoint
from keybar.models.user import User
from keybar.serializers.user import UserSerializer
class UserEndpoint(Endpoint):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserListEndpoint(UserEndpoint, ListEndpoint):
pass
class UserRegisterEndpoint(UserEndpoint):
"""Endpoint to register a new user.
TODO:
* Take email verification into account
"""
authentication_classes = ()
permission_classes = ()
def create(self, request, *args, **kwargs):
form = SignupForm(request.data)
if form.is_valid():
user = form.save(request)
return Response(self.serializer_class(user).data)
else:
return Response(form.errors)
|
from allauth.account.forms import SignupForm
from rest_framework.response import Response
from keybar.api.base import Endpoint, ListEndpoint
from keybar.models.user import User
from keybar.serializers.user import UserSerializer
class UserEndpoint(Endpoint):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserListEndpoint(UserEndpoint, ListEndpoint):
pass
class UserRegisterEndpoint(UserEndpoint):
"""Endpoint to register a new user.
This explicitly is a new endpoint because it's unauthenticated.
TODO:
* Take email verification into account
"""
authentication_classes = ()
permission_classes = ()
def create(self, request, *args, **kwargs):
form = SignupForm(request.data)
if form.is_valid():
user = form.save(request)
return Response(self.serializer_class(user).data)
else:
return Response(form.errors)
|
Add note about why register is a separate endpoint.
|
Add note about why register is a separate endpoint.
|
Python
|
bsd-3-clause
|
keybar/keybar
|
---
+++
@@ -18,6 +18,8 @@
class UserRegisterEndpoint(UserEndpoint):
"""Endpoint to register a new user.
+ This explicitly is a new endpoint because it's unauthenticated.
+
TODO:
* Take email verification into account
|
980b7f55968d76b6f9222b7c381e1c98e144ddeb
|
tests/database_tests.py
|
tests/database_tests.py
|
from .query_tests import QueryTestCase
from .sql_builder_tests import SqlBuilderTestCase
from .transaction_tests import TransactionTestCase
from rebel.database import Database
from rebel.exceptions import NotInsideTransaction, MixedPositionalAndNamedArguments
class DatabaseTestCase(QueryTestCase, SqlBuilderTestCase, TransactionTestCase):
def setUp(self):
driver = self.get_driver()
self.db = Database(driver)
self.create_tables()
self.clear_tables()
self.fill_cities()
def fill_cities(self):
self.db.execute("""
INSERT INTO cities (name)
VALUES (?), (?), (?)
""", 'New York', 'Washington', 'Los Angeles')
|
from .query_tests import QueryTestCase
from .sql_builder_tests import SqlBuilderTestCase
from .transaction_tests import TransactionTestCase
from rebel.database import Database
class DatabaseTestCase(QueryTestCase, SqlBuilderTestCase, TransactionTestCase):
def setUp(self):
driver = self.get_driver()
self.db = Database(driver)
self.create_tables()
self.clear_tables()
self.fill_cities()
def fill_cities(self):
self.db.execute("""
INSERT INTO cities (name)
VALUES (?), (?), (?)
""", 'New York', 'Washington', 'Los Angeles')
|
Remove unused imports from database tests
|
Remove unused imports from database tests
|
Python
|
mit
|
hugollm/rebel,hugollm/rebel
|
---
+++
@@ -1,9 +1,7 @@
from .query_tests import QueryTestCase
from .sql_builder_tests import SqlBuilderTestCase
from .transaction_tests import TransactionTestCase
-
from rebel.database import Database
-from rebel.exceptions import NotInsideTransaction, MixedPositionalAndNamedArguments
class DatabaseTestCase(QueryTestCase, SqlBuilderTestCase, TransactionTestCase):
|
943856b68531b54e0ec4b34a74c2408311760d23
|
nova/tests/scheduler/__init__.py
|
nova/tests/scheduler/__init__.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
Fix and Gate on H303 (no wildcard imports)
|
Fix and Gate on H303 (no wildcard imports)
Wildcard imports make reading code unnecessarily confusing because they
make it harder to see where a functions comes from. We had two types of
wildcard imports in the code. Unneeded ones in test files that are just
removed, and some that we actually want which are kept using the '#
noqa' comment to tell flake8 to skip specific checks (such as H303)
for that line.
Change-Id: Id4705011579659fd74a4aaa05ac541e9694c483e
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
---
+++
@@ -14,6 +14,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
-from nova.tests import *
|
a08fff5946f5faa5d174cf7536bd4e71e6d299a0
|
tests/test_blueprint.py
|
tests/test_blueprint.py
|
import pytest
import broadbean as bb
@pytest.fixture
def virgin_blueprint():
"""
Return an empty instance of BluePrint
"""
return bb.BluePrint()
##################################################
# TEST BARE INITIALISATION
def test_creation(virgin_blueprint):
assert isinstance(virgin_blueprint, bb.BluePrint)
def test_creation_funlist(virgin_blueprint):
assert virgin_blueprint._funlist == []
def test_creation_nameslist(virgin_blueprint):
assert virgin_blueprint._namelist == []
def test_creation_argslist(virgin_blueprint):
assert virgin_blueprint._argslist == []
def test_creation_tslist(virgin_blueprint):
assert virgin_blueprint._tslist == []
|
import pytest
import broadbean as bb
@pytest.fixture
def virgin_blueprint():
"""
Return an empty instance of BluePrint
"""
return bb.BluePrint()
##################################################
# TEST BARE INITIALISATION
def test_creation(virgin_blueprint):
assert isinstance(virgin_blueprint, bb.BluePrint)
@pytest.mark.parametrize("attribute, expected", [('_funlist', []),
('_namelist', []),
('_argslist', []),
('_tslist', [])])
def test_bob(virgin_blueprint, attribute, expected):
assert virgin_blueprint.__getattribute__(attribute) == expected
# def test_creation_funlist(virgin_blueprint):
# assert virgin_blueprint._funlist == []
# def test_creation_nameslist(virgin_blueprint):
# assert virgin_blueprint._namelist == []
# def test_creation_argslist(virgin_blueprint):
# assert virgin_blueprint._argslist == []
# def test_creation_tslist(virgin_blueprint):
# assert virgin_blueprint._tslist == []
#def test_creation_durslist(virgin_blueprint):
# assert virgin_blueprint._durslist == []
#def test_creation_marker1(vi
|
Make the test look fancy
|
refactor: Make the test look fancy
Use fancy decorators to make the simple test look fancy.
|
Python
|
mit
|
WilliamHPNielsen/broadbean
|
---
+++
@@ -9,25 +9,42 @@
"""
return bb.BluePrint()
+
##################################################
# TEST BARE INITIALISATION
-
def test_creation(virgin_blueprint):
assert isinstance(virgin_blueprint, bb.BluePrint)
-def test_creation_funlist(virgin_blueprint):
- assert virgin_blueprint._funlist == []
+@pytest.mark.parametrize("attribute, expected", [('_funlist', []),
+ ('_namelist', []),
+ ('_argslist', []),
+ ('_tslist', [])])
+def test_bob(virgin_blueprint, attribute, expected):
+ assert virgin_blueprint.__getattribute__(attribute) == expected
-def test_creation_nameslist(virgin_blueprint):
- assert virgin_blueprint._namelist == []
-def test_creation_argslist(virgin_blueprint):
- assert virgin_blueprint._argslist == []
+
+# def test_creation_funlist(virgin_blueprint):
+# assert virgin_blueprint._funlist == []
-def test_creation_tslist(virgin_blueprint):
- assert virgin_blueprint._tslist == []
+# def test_creation_nameslist(virgin_blueprint):
+# assert virgin_blueprint._namelist == []
+
+
+# def test_creation_argslist(virgin_blueprint):
+# assert virgin_blueprint._argslist == []
+
+
+# def test_creation_tslist(virgin_blueprint):
+# assert virgin_blueprint._tslist == []
+
+
+#def test_creation_durslist(virgin_blueprint):
+# assert virgin_blueprint._durslist == []
+
+#def test_creation_marker1(vi
|
0ba5b555c4ccb559b5f666e800cc7102b5d9729f
|
rctkdemos/layouts_grid.py
|
rctkdemos/layouts_grid.py
|
from rctkdemos.demos import serve_demo
from rctk.widgets import StaticText
from rctk.layouts import GridLayout
class Demo(object):
title = "Grid"
description = "Demonstrates the Grid using padding and different col/rowspans"
def build(self, tk, parent):
parent.setLayout(GridLayout(columns=3, padx=2, pady=3))
parent.append(StaticText(tk, "Win 1", background="red"), colspan=2)
parent.append(StaticText(tk, "Win 2", background="yellow"))
parent.append(StaticText(tk, "Win 3", background="green"), rowspan=2)
parent.append(StaticText(tk, "Win 4", background="orange"))
parent.append(StaticText(tk, "Win 5", background="blue"))
parent.append(StaticText(tk, "Win 6", background="pink"))
parent.append(StaticText(tk, "Win 7", background="grey"))
parent.append(StaticText(tk, "Win 8", background="brown"), rowspan=2, colspan=2)
parent.layout()
if __name__ == '__main__':
serve_demo(Demo)
|
from rctkdemos.demos import serve_demo, standalone
from rctk.widgets import StaticText
from rctk.layouts import GridLayout
class Demo(object):
title = "Grid"
description = "Demonstrates the Grid using padding and different col/rowspans"
def build(self, tk, parent):
parent.setLayout(GridLayout(columns=3, padx=2, pady=3))
parent.append(StaticText(tk, "Win 1", background="red"), colspan=2)
parent.append(StaticText(tk, "Win 2", background="yellow"))
parent.append(StaticText(tk, "Win 3", background="green"), rowspan=2)
parent.append(StaticText(tk, "Win 4", background="orange"))
parent.append(StaticText(tk, "Win 5", background="blue"))
parent.append(StaticText(tk, "Win 6", background="pink"))
parent.append(StaticText(tk, "Win 7", background="grey"))
parent.append(StaticText(tk, "Win 8", background="brown"), rowspan=2, colspan=2)
parent.layout()
Standalone = standalone(Demo)
if __name__ == '__main__':
serve_demo(Demo)
|
Enable running layout demo standalone
|
Enable running layout demo standalone
git-svn-id: de585c8a1036fae0bde8438f23c67a99526c94d0@627 286bb87c-ec97-11de-a004-2f18c49ebcc3
|
Python
|
bsd-2-clause
|
rctk/demos
|
---
+++
@@ -1,4 +1,4 @@
-from rctkdemos.demos import serve_demo
+from rctkdemos.demos import serve_demo, standalone
from rctk.widgets import StaticText
from rctk.layouts import GridLayout
@@ -18,6 +18,8 @@
parent.append(StaticText(tk, "Win 8", background="brown"), rowspan=2, colspan=2)
parent.layout()
+Standalone = standalone(Demo)
+
if __name__ == '__main__':
serve_demo(Demo)
|
f0c45df83b5fabeefcef5d90fd6084c3ea743995
|
arches/db/migration_operations/extras.py
|
arches/db/migration_operations/extras.py
|
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION IF EXISTS %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
|
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS \"%s\"" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION IF EXISTS \"%s\"" % self.name)
def describe(self):
return "Creates extension %s" % self.name
|
Add double quotes to sql statement in CreateExtension module.
|
Add double quotes to sql statement in CreateExtension module.
|
Python
|
agpl-3.0
|
cvast/arches,cvast/arches,archesproject/arches,cvast/arches,archesproject/arches,archesproject/arches,cvast/arches,archesproject/arches
|
---
+++
@@ -9,12 +9,10 @@
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
- schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
+ schema_editor.execute("CREATE EXTENSION IF NOT EXISTS \"%s\"" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
- schema_editor.execute("DROP EXTENSION IF EXISTS %s" % self.name)
+ schema_editor.execute("DROP EXTENSION IF EXISTS \"%s\"" % self.name)
def describe(self):
return "Creates extension %s" % self.name
-
-
|
e1e430f74902d653e9c46878a8f254f8feb478ca
|
example/article/models.py
|
example/article/models.py
|
from django.core.urlresolvers import reverse
from django.db import models
from fluent_comments.moderation import moderate_model, comments_are_open, comments_are_moderated
from fluent_comments.models import get_comments_for_model, CommentsRelation
class Article(models.Model):
title = models.CharField("Title", max_length=200)
slug = models.SlugField("Slug", unique=True)
content = models.TextField("Content")
publication_date = models.DateTimeField("Publication date")
enable_comments = models.BooleanField("Enable comments", default=True)
# Optional reverse relation, allow ORM querying:
comments_set = CommentsRelation()
class Meta:
verbose_name = "Article"
verbose_name_plural = "Articles"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('article-details', kwargs={'slug': self.slug})
# Optional, give direct access to moderation info via the model:
comments = property(get_comments_for_model)
comments_are_open = property(comments_are_open)
comments_are_moderated = property(comments_are_moderated)
# Give the generic app support for moderation by django-fluent-comments:
moderate_model(
Article,
publication_date_field='publication_date',
enable_comments_field='enable_comments'
)
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.six import python_2_unicode_compatible
from fluent_comments.moderation import moderate_model, comments_are_open, comments_are_moderated
from fluent_comments.models import get_comments_for_model, CommentsRelation
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField("Title", max_length=200)
slug = models.SlugField("Slug", unique=True)
content = models.TextField("Content")
publication_date = models.DateTimeField("Publication date")
enable_comments = models.BooleanField("Enable comments", default=True)
# Optional reverse relation, allow ORM querying:
comments_set = CommentsRelation()
class Meta:
verbose_name = "Article"
verbose_name_plural = "Articles"
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('article-details', kwargs={'slug': self.slug})
# Optional, give direct access to moderation info via the model:
comments = property(get_comments_for_model)
comments_are_open = property(comments_are_open)
comments_are_moderated = property(comments_are_moderated)
# Give the generic app support for moderation by django-fluent-comments:
moderate_model(
Article,
publication_date_field='publication_date',
enable_comments_field='enable_comments'
)
|
Fix example Article.__str__ in Python 3
|
Fix example Article.__str__ in Python 3
|
Python
|
apache-2.0
|
django-fluent/django-fluent-comments,django-fluent/django-fluent-comments,edoburu/django-fluent-comments,edoburu/django-fluent-comments,django-fluent/django-fluent-comments,django-fluent/django-fluent-comments,edoburu/django-fluent-comments
|
---
+++
@@ -1,9 +1,12 @@
from django.core.urlresolvers import reverse
from django.db import models
+from django.utils.six import python_2_unicode_compatible
+
from fluent_comments.moderation import moderate_model, comments_are_open, comments_are_moderated
from fluent_comments.models import get_comments_for_model, CommentsRelation
+@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField("Title", max_length=200)
slug = models.SlugField("Slug", unique=True)
@@ -19,7 +22,7 @@
verbose_name = "Article"
verbose_name_plural = "Articles"
- def __unicode__(self):
+ def __str__(self):
return self.title
def get_absolute_url(self):
|
2c7907c6516ded896000dec610bde09f7721915d
|
ckanext/datasetversions/logic/action/create.py
|
ckanext/datasetversions/logic/action/create.py
|
import ckan.logic as logic
from ckan.logic.action.get import package_show as ckan_package_show
from ckan.plugins import toolkit
from ckanext.datasetversions.helpers import get_context
def dataset_version_create(context, data_dict):
id = data_dict.get('id')
parent_name = data_dict.get('base_name')
owner_org = data_dict.get('owner_org')
parent_dict = {
'name': parent_name,
'__parent': True,
}
if owner_org:
parent_dict['owner_org'] = owner_org
parent_dict['private'] = True
else:
parent_dict['private'] = False
parent = _get_or_create_parent_dataset(
context,
parent_dict
)
toolkit.get_action('package_relationship_create')(
get_context(context), {
'subject': id,
'object': parent['id'],
'type': 'child_of',
}
)
def _get_or_create_parent_dataset(context, data_dict):
try:
dataset = ckan_package_show(
get_context(context), {'id': data_dict['name']})
except (logic.NotFound):
dataset = toolkit.get_action('package_create')(
get_context(context), data_dict)
return dataset
|
import ckan.logic as logic
from ckan.logic.action.get import package_show as ckan_package_show
from ckan.plugins import toolkit
from ckanext.datasetversions.helpers import get_context
def dataset_version_create(context, data_dict):
id = data_dict.get('id')
parent_name = data_dict.get('base_name')
owner_org = data_dict.get('owner_org')
parent_dict = {
'name': parent_name,
'type': data_dict.get('type', 'dataset'),
'__parent': True,
}
if owner_org:
parent_dict['owner_org'] = owner_org
parent_dict['private'] = True
else:
parent_dict['private'] = False
parent = _get_or_create_parent_dataset(
context,
parent_dict
)
toolkit.get_action('package_relationship_create')(
get_context(context), {
'subject': id,
'object': parent['id'],
'type': 'child_of',
}
)
def _get_or_create_parent_dataset(context, data_dict):
try:
dataset = ckan_package_show(
get_context(context), {'id': data_dict['name']})
except (logic.NotFound):
dataset = toolkit.get_action('package_create')(
get_context(context), data_dict)
return dataset
|
Create a parent with the same dataset type
|
Create a parent with the same dataset type
|
Python
|
agpl-3.0
|
aptivate/ckanext-datasetversions,aptivate/ckanext-datasetversions,aptivate/ckanext-datasetversions
|
---
+++
@@ -13,6 +13,7 @@
parent_dict = {
'name': parent_name,
+ 'type': data_dict.get('type', 'dataset'),
'__parent': True,
}
|
69853e5ef1ef297c776fd23a48b0ac0b2356f06f
|
examples/fantasy/tasks.py
|
examples/fantasy/tasks.py
|
import json
from pathlib import Path
import sys
import sqlalchemy as sa
from invoke import task
FANTASY_DB_SQL = Path.cwd() / 'fantasy-database' / 'schema.sql'
FANTASY_DB_DATA = Path.cwd() / 'fantasy-database' / 'data.json'
@task
def populate_db(ctx, data_file=FANTASY_DB_DATA):
from examples.fantasy import tables
if not Path(data_file).exists():
sys.exit(f'Invalid data file: {data_file}')
with data_file.open() as f:
data = json.load(f)
create_sql = FANTASY_DB_SQL.read_text()
engine = \
sa.create_engine('postgresql://example:somepassword@localhost/example',
echo=True)
conn = engine.connect()
trans = conn.begin()
conn.execute(sa.text(create_sql))
tables_in_order = ('photos', 'stores', 'authors', 'series', 'books',
'chapters', 'books_stores')
try:
for table_name in tables_in_order:
table = getattr(tables, table_name)
values = data[table_name]
for value in values:
query = table.insert().values(value)
conn.execute(query)
trans.commit()
except Exception as exc:
trans.rollback()
raise
print('\nDatabase is successfully populated!')
|
import json
from pathlib import Path
import sys
import sqlalchemy as sa
from invoke import task
FANTASY_DATA_FOLDER = Path(__file__).parent / 'fantasy-database'
@task
def populate_db(ctx, data_folder=FANTASY_DATA_FOLDER, dsn=None):
from examples.fantasy import tables
data_file = data_folder / 'data.json'
if not Path(data_file).exists():
sys.exit(f'Invalid data file: {data_file}')
with data_file.open() as f:
data = json.load(f)
create_sql = (data_folder / 'schema.sql').read_text()
if dsn is None:
dsn = 'postgresql://example:somepassword@localhost/example'
engine = sa.create_engine(dsn, echo=True)
conn = engine.connect()
trans = conn.begin()
conn.execute(sa.text(create_sql))
tables_in_order = ('photos', 'stores', 'authors', 'series', 'books',
'chapters', 'books_stores')
try:
for table_name in tables_in_order:
table = getattr(tables, table_name)
values = data[table_name]
for value in values:
query = table.insert().values(value)
conn.execute(query)
trans.commit()
except Exception as exc:
trans.rollback()
raise
print('\nDatabase is successfully populated!')
|
Refactor populate_db pyinvoke task to use it in tests
|
Refactor populate_db pyinvoke task to use it in tests
|
Python
|
mit
|
vovanbo/aiohttp_json_api
|
---
+++
@@ -5,25 +5,26 @@
import sqlalchemy as sa
from invoke import task
-FANTASY_DB_SQL = Path.cwd() / 'fantasy-database' / 'schema.sql'
-FANTASY_DB_DATA = Path.cwd() / 'fantasy-database' / 'data.json'
+FANTASY_DATA_FOLDER = Path(__file__).parent / 'fantasy-database'
@task
-def populate_db(ctx, data_file=FANTASY_DB_DATA):
+def populate_db(ctx, data_folder=FANTASY_DATA_FOLDER, dsn=None):
from examples.fantasy import tables
+ data_file = data_folder / 'data.json'
if not Path(data_file).exists():
sys.exit(f'Invalid data file: {data_file}')
with data_file.open() as f:
data = json.load(f)
- create_sql = FANTASY_DB_SQL.read_text()
+ create_sql = (data_folder / 'schema.sql').read_text()
- engine = \
- sa.create_engine('postgresql://example:somepassword@localhost/example',
- echo=True)
+ if dsn is None:
+ dsn = 'postgresql://example:somepassword@localhost/example'
+
+ engine = sa.create_engine(dsn, echo=True)
conn = engine.connect()
trans = conn.begin()
|
47540d79fbf3009f1dff27d45f935859460349f9
|
sevenbridges/models/compound/tasks/__init__.py
|
sevenbridges/models/compound/tasks/__init__.py
|
from sevenbridges.models.file import File
def map_input_output(item, api):
"""
Maps item to appropriate sevebridges object.
:param item: Input/Output value.
:param api: Api instance.
:return: Mapped object.
"""
if isinstance(item, list):
return [map_input_output(it, api) for it in item]
elif isinstance(item, dict) and 'class' in item:
if item['class'].lower() in ('file', 'directory'):
_secondary_files = []
for _file in item.get('secondaryFiles', []):
_secondary_files.append({'id': _file['path']})
data = {
'id': item['path']
}
if _secondary_files:
data.update({
'_secondary_files': _secondary_files,
'fetched': True
})
return File(api=api, **data)
else:
return item
|
from sevenbridges.models.file import File
def map_input_output(item, api):
"""
Maps item to appropriate sevebridges object.
:param item: Input/Output value.
:param api: Api instance.
:return: Mapped object.
"""
if isinstance(item, list):
return [map_input_output(it, api) for it in item]
elif isinstance(item, dict) and 'class' in item:
if item['class'].lower() in ('file', 'directory'):
_secondary_files = []
for _file in item.get('secondaryFiles', []):
_secondary_files.append({'id': _file['path']})
data = {
'id': item['path']
}
data.update({k: item[k] for k in item if k != 'path'})
if _secondary_files:
data.update({
'_secondary_files': _secondary_files,
'fetched': True
})
return File(api=api, **data)
else:
return item
|
Set additional fields when mapping inputs and outputs
|
Set additional fields when mapping inputs and outputs
This will reduce the risk of unnecessary lazy fetching
|
Python
|
apache-2.0
|
sbg/sevenbridges-python
|
---
+++
@@ -19,6 +19,7 @@
data = {
'id': item['path']
}
+ data.update({k: item[k] for k in item if k != 'path'})
if _secondary_files:
data.update({
'_secondary_files': _secondary_files,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.