commit
stringlengths
40
40
old_file
stringlengths
4
150
new_file
stringlengths
4
150
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
501
message
stringlengths
15
4.06k
lang
stringclasses
4 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
diff
stringlengths
0
4.35k
462656f9653ae43ea69080414735927b18e0debf
stats/random_walk.py
stats/random_walk.py
import neo4j import random from logbook import Logger log = Logger('trinity.topics') DEFAULT_DEPTH = 5 NUM_WALKS = 100 # Passed sorted list (desc order), return top nodes TO_RETURN = lambda x: x[:10] random.seed() def random_walk(graph, node, depth=DEFAULT_DEPTH): # Pick random neighbor neighbors = {} i = 0 for r in node.relationships().outgoing: #TODO replace with i + r['count'] neighbors[(i, i + 1)] = r.getOtherNode(node) i += 1 choice = random.range(i) for x,y in neighbors: if x <= i and i < y: return [node].extend(random_walk(graph, neighbors[(x,y)], depth-1)) def run(graph, index, node): nodes = {} for i in range(NUM_WALKS): with graph.transaction: walked_nodes = random_walk(graph, node) # Loop through nodes (that aren't the start node), count for n in filter(lambda m: m.id != node.id, walked_nodes): if nodes.has_key(n): nodes[n]++ else nodes[n] = 1 return TO_RETURN(sorted(nodes, key=nodes.__getitem__))
import neo4j import random DEFAULT_DEPTH = 5 NUM_WALKS = 100 # Passed sorted list (desc order), return top nodes TO_RETURN = lambda x: x[:10] random.seed() def random_walk(graph, node, depth=DEFAULT_DEPTH): if depth == 0: return [node] # Pick random neighbor neighbors = {} i = 0 for r in node.relationships().outgoing: neighbors[(i, i + int(r['count']))] = r.getOtherNode(node) i += int(r['count']) if i == 0: # No neighbors return [node] r = random.randrange(i) for x,y in neighbors: if x <= r and r < y: return [node] + random_walk(graph, neighbors[(x,y)], depth-1) def run(graph, index, node): nodes = {} for i in range(NUM_WALKS): with graph.transaction: walked_nodes = random_walk(graph, node) # Loop through nodes (that aren't the start node), count for n in filter(lambda m: m.id != node.id, walked_nodes): if nodes.has_key(n): nodes[n] += 1 else: nodes[n] = 1 return TO_RETURN([{'name': n['name'], 'count': nodes[n]} for n in sorted(nodes, key=nodes.__getitem__)])
Modify random walk so that it works.
Modify random walk so that it works.
Python
mit
peplin/trinity
--- +++ @@ -1,8 +1,5 @@ import neo4j import random - -from logbook import Logger -log = Logger('trinity.topics') DEFAULT_DEPTH = 5 @@ -12,17 +9,22 @@ random.seed() def random_walk(graph, node, depth=DEFAULT_DEPTH): + if depth == 0: + return [node] + # Pick random neighbor neighbors = {} i = 0 for r in node.relationships().outgoing: - #TODO replace with i + r['count'] - neighbors[(i, i + 1)] = r.getOtherNode(node) - i += 1 - choice = random.range(i) + neighbors[(i, i + int(r['count']))] = r.getOtherNode(node) + i += int(r['count']) + if i == 0: + # No neighbors + return [node] + r = random.randrange(i) for x,y in neighbors: - if x <= i and i < y: - return [node].extend(random_walk(graph, neighbors[(x,y)], depth-1)) + if x <= r and r < y: + return [node] + random_walk(graph, neighbors[(x,y)], depth-1) def run(graph, index, node): nodes = {} @@ -32,8 +34,9 @@ # Loop through nodes (that aren't the start node), count for n in filter(lambda m: m.id != node.id, walked_nodes): if nodes.has_key(n): - nodes[n]++ - else + nodes[n] += 1 + else: nodes[n] = 1 - return TO_RETURN(sorted(nodes, key=nodes.__getitem__)) + return TO_RETURN([{'name': n['name'], 'count': nodes[n]} + for n in sorted(nodes, key=nodes.__getitem__)])
8eed621a15dafc8b0965c59b8da2296f8193d0ca
karabo_data/tests/test_agipd_geometry.py
karabo_data/tests/test_agipd_geometry.py
import numpy as np from karabo_data.geometry2 import AGIPD_1MGeometry def test_snap_assemble_data(): geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ (-525, 625), (-550, -10), (520, -160), (542.5, 475), ]) snap_geom = geom.snap() stacked_data = np.zeros((16, 512, 128)) img, centre = snap_geom.position_all_modules(stacked_data) assert img.shape == (1296, 1132) assert tuple(centre) == (651, 570) assert np.isnan(img[0, 0]) assert img[50, 50] == 0
import numpy as np from karabo_data.geometry2 import AGIPD_1MGeometry def test_snap_assemble_data(): geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ (-525, 625), (-550, -10), (520, -160), (542.5, 475), ]) snap_geom = geom.snap() stacked_data = np.zeros((16, 512, 128)) img, centre = snap_geom.position_all_modules(stacked_data) assert img.shape == (1296, 1132) assert tuple(centre) == (651, 570) assert np.isnan(img[0, 0]) assert img[50, 50] == 0 def test_write_read_crystfel_file(tmpdir): geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ (-525, 625), (-550, -10), (520, -160), (542.5, 475), ]) path = str(tmpdir / 'test.geom') geom.write_crystfel_geom(path) # We need to add some experiment details before cfelpyutils will read the # file with open(path, 'r') as f: contents = f.read() with open(path, 'w') as f: f.write('clen = 0.119\n') f.write('adu_per_eV = 0.0075\n') f.write(contents) loaded = AGIPD_1MGeometry.from_crystfel_geom(path) np.testing.assert_allclose(loaded.modules[0][0].corner_pos, geom.modules[0][0].corner_pos) np.testing.assert_allclose(loaded.modules[0][0].fs_vec, geom.modules[0][0].fs_vec)
Add test of reading & writing CrystFEL geometry
Add test of reading & writing CrystFEL geometry
Python
bsd-3-clause
European-XFEL/h5tools-py
--- +++ @@ -17,3 +17,28 @@ assert tuple(centre) == (651, 570) assert np.isnan(img[0, 0]) assert img[50, 50] == 0 + +def test_write_read_crystfel_file(tmpdir): + geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[ + (-525, 625), + (-550, -10), + (520, -160), + (542.5, 475), + ]) + path = str(tmpdir / 'test.geom') + geom.write_crystfel_geom(path) + + # We need to add some experiment details before cfelpyutils will read the + # file + with open(path, 'r') as f: + contents = f.read() + with open(path, 'w') as f: + f.write('clen = 0.119\n') + f.write('adu_per_eV = 0.0075\n') + f.write(contents) + + loaded = AGIPD_1MGeometry.from_crystfel_geom(path) + np.testing.assert_allclose(loaded.modules[0][0].corner_pos, + geom.modules[0][0].corner_pos) + np.testing.assert_allclose(loaded.modules[0][0].fs_vec, + geom.modules[0][0].fs_vec)
2e6823676dace8b3219aeeef69ab04a2a0dd533a
rally-scenarios/plugins/sample_plugin.py
rally-scenarios/plugins/sample_plugin.py
# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample plugin for Heat. For more Heat related benchmarks take a look here: http://github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/heat/ About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class HeatPlugin(base.Scenario): @base.scenario(context={"cleanup": ["heat"]}) def list_benchmark(self, container_format, image_location, disk_format, **kwargs): """Get heatclient and do whatever.""" stacks = list(self.clients("heat").stacks.list())
# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sample plugin for Heat. For more Heat related benchmarks take a look here: http://github.com/openstack/rally/tree/master/rally/plugins/openstack/scenarios/heat About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class HeatPlugin(base.Scenario): @base.scenario(context={"cleanup": ["heat"]}) def list_benchmark(self, container_format, image_location, disk_format, **kwargs): """Get heatclient and do whatever.""" stacks = list(self.clients("heat").stacks.list())
Fix outdated link in sample plugin
Fix outdated link in sample plugin Link in sample_plugin.py is outdated and is changed Change-Id: I2f3a7b59c6380e4584a8ce2a5313fe766a40a52a Closes-Bug: #1491975
Python
apache-2.0
steveb/heat,pratikmallya/heat,openstack/heat,gonzolino/heat,dragorosson/heat,dims/heat,steveb/heat,cwolferh/heat-scratch,openstack/heat,takeshineshiro/heat,dragorosson/heat,noironetworks/heat,noironetworks/heat,cwolferh/heat-scratch,jasondunsmore/heat,jasondunsmore/heat,maestro-hybrid-cloud/heat,dims/heat,maestro-hybrid-cloud/heat,pratikmallya/heat,gonzolino/heat,takeshineshiro/heat
--- +++ @@ -16,7 +16,7 @@ """Sample plugin for Heat. For more Heat related benchmarks take a look here: -http://github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/heat/ +http://github.com/openstack/rally/tree/master/rally/plugins/openstack/scenarios/heat About plugins: https://rally.readthedocs.org/en/latest/plugins.html
eb22e8931b9ffe9c82b52451a7c17943ea43625d
python-tool/script/project.py
python-tool/script/project.py
$py_copyright def _main(): print("Hello from $project_name") if __name__ == "__main__": _main()
#!/usr/bin/env python $py_copyright from __future__ import print_function def _main(): print("Hello from $project_name") if __name__ == "__main__": _main()
Add shebang and future import
Add shebang and future import
Python
mit
rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates,rcook/ptool-templates
--- +++ @@ -1,4 +1,6 @@ +#!/usr/bin/env python $py_copyright +from __future__ import print_function def _main(): print("Hello from $project_name")
879b15779c921445ca4412d5e63319408d8e32bf
python/islp/02statlearn-ex.py
python/islp/02statlearn-ex.py
import pandas as pd print('\nKNN\n---') d = {'X1': [ 0, 2, 0, 0, -1, 1 ], 'X2': [ 3, 0, 1, 1, 0, 1 ], 'X3': [ 0, 0, 3, 2, 1, 1 ], 'Y': ['R', 'R', 'R', 'G', 'G', 'R']} df = pd.DataFrame(data = d) df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5)) df = df.sort_values(by='dist') print(df) print('K=1 =>', df.head(1).Y.to_numpy()[0]) print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary ascending=False).index.values[0]) print('\nCollege.csv\n-----------') df = pd.read_csv('College.csv') print(df)
import matplotlib.pyplot as plt import pandas as pd print('\nKNN\n---') d = {'X1': [ 0, 2, 0, 0, -1, 1 ], 'X2': [ 3, 0, 1, 1, 0, 1 ], 'X3': [ 0, 0, 3, 2, 1, 1 ], 'Y': ['R', 'R', 'R', 'G', 'G', 'R']} df = pd.DataFrame(data = d) df = df.assign(dist = (df.X1**2 + df.X2**2 + df.X3**2)**(0.5)) df = df.sort_values(by='dist') print(df) print('K=1 =>', df.head(1).Y.to_numpy()[0]) print('K=3 =>', df.head(3).groupby('Y').count().sort_values(by='dist', # arbitrary ascending=False).index.values[0]) print('\nCollege.csv\n-----------') df = pd.read_csv('College.csv') df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True) df.set_index('Name', inplace=True) print(df.describe()) fig = plt.figure() gs = fig.add_gridspec(10, 10) for r in range(10): for c in range(10): axes = fig.add_subplot(gs[r, c]) axes.xaxis.set_visible(False) axes.yaxis.set_visible(False) if r == c: axes.annotate(df.columns.values[r], (0.5, 0.5), xycoords='axes fraction', ha='center', va='center') else: df.plot.scatter(x=r, y=c, ax=axes) plt.show()
Add scatterplot matrix for college.csv.
Add scatterplot matrix for college.csv.
Python
apache-2.0
pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff,pdbartlett/misc-stuff
--- +++ @@ -1,3 +1,4 @@ +import matplotlib.pyplot as plt import pandas as pd print('\nKNN\n---') @@ -15,4 +16,19 @@ print('\nCollege.csv\n-----------') df = pd.read_csv('College.csv') -print(df) +df.rename(columns={'Unnamed: 0': 'Name'}, inplace=True) +df.set_index('Name', inplace=True) +print(df.describe()) +fig = plt.figure() +gs = fig.add_gridspec(10, 10) +for r in range(10): + for c in range(10): + axes = fig.add_subplot(gs[r, c]) + axes.xaxis.set_visible(False) + axes.yaxis.set_visible(False) + if r == c: + axes.annotate(df.columns.values[r], (0.5, 0.5), + xycoords='axes fraction', ha='center', va='center') + else: + df.plot.scatter(x=r, y=c, ax=axes) +plt.show()
35514dcb70ed5ede39299802e82fa352188f3546
examples/nested_inline_tasksets.py
examples/nested_inline_tasksets.py
from locust import HttpUser, TaskSet, task, between class WebsiteUser(HttpUser): """ Example of the ability of inline nested TaskSet classes """ host = "http://127.0.0.1:8089" wait_time = between(2, 5) class TopLevelTaskSet(TaskSet): @task class IndexTaskSet(TaskSet): @task(10) def index(self): self.client.get("/") @task(1) def stop(self): self.interrupt() @task def stats(self): self.client.get("/stats/requests") tasks = [TopLevelTaskSet]
from locust import HttpUser, TaskSet, task, between class WebsiteUser(HttpUser): """ Example of the ability of inline nested TaskSet classes """ host = "http://127.0.0.1:8089" wait_time = between(2, 5) @task class TopLevelTaskSet(TaskSet): @task class IndexTaskSet(TaskSet): @task(10) def index(self): self.client.get("/") @task(1) def stop(self): self.interrupt() @task def stats(self): self.client.get("/stats/requests")
Use @task decorator in taskset example
Use @task decorator in taskset example
Python
mit
mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust,mbeacom/locust,mbeacom/locust,locustio/locust,locustio/locust
--- +++ @@ -7,7 +7,8 @@ """ host = "http://127.0.0.1:8089" wait_time = between(2, 5) - + + @task class TopLevelTaskSet(TaskSet): @task class IndexTaskSet(TaskSet): @@ -22,5 +23,3 @@ @task def stats(self): self.client.get("/stats/requests") - - tasks = [TopLevelTaskSet]
8af483f6bfd0576f694b6693deacbf25a782fe5a
pyfire/logger.py
pyfire/logger.py
# -*- coding: utf-8 -*- """ pyfire.logger ~~~~~~~~~~~~~ Use pocoo's logbook or a simple no-op fallback :copyright: (c) 2011 by the pyfire Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from logbook import Logger except ImportError: class Logger(object): def __init__(self, name, level=0): self.name = name self.level = level debug = info = warn = warning = notice = error = exception = \ critical = log = lambda *a, **kw: None
# -*- coding: utf-8 -*- """ pyfire.logger ~~~~~~~~~~~~~ Use pocoo's logbook or a simple no-op fallback :copyright: (c) 2011 by the pyfire Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import warnings import pyfire.configuration as config try: import logbook class Logger(logbook.Logger): def __init__(self, name): try: level = config.get('logging', name.replace('.','_')).upper() except config.NoOptionError: level = '' if not level: level = config.get('logging', 'global_level').upper() if level not in frozenset(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']): warnings.warn("No such loglevel %s" % level, RuntimeWarning) level = 'ERROR' super(Logger, self).__init__(name, getattr(logbook, level)) except ImportError: class Logger(object): def __init__(self, name, level=0): self.name = name self.level = level debug = info = warn = warning = notice = error = exception = \ critical = log = lambda *a, **kw: None
Implement configuration support in logging
Implement configuration support in logging
Python
bsd-3-clause
IgnitedAndExploded/pyfire,IgnitedAndExploded/pyfire
--- +++ @@ -9,8 +9,28 @@ :license: BSD, see LICENSE for more details. """ +import warnings +import pyfire.configuration as config + + try: - from logbook import Logger + import logbook + class Logger(logbook.Logger): + def __init__(self, name): + try: + level = config.get('logging', name.replace('.','_')).upper() + except config.NoOptionError: + level = '' + + if not level: + level = config.get('logging', 'global_level').upper() + + if level not in frozenset(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']): + warnings.warn("No such loglevel %s" % level, RuntimeWarning) + level = 'ERROR' + + super(Logger, self).__init__(name, getattr(logbook, level)) + except ImportError: class Logger(object): def __init__(self, name, level=0):
1a1aab7c0a107b2a02d08f13c40c515ff265b60b
tempwatcher/watch.py
tempwatcher/watch.py
import json import requests class TemperatureWatch(object): thermostat_url = None alert_high = 80 alert_low = 60 _last_response = None def get_info(self): r = requests.get(self.thermostat_url + '/tstat') self._last_response = json.loads(r.text) return r.text def check_temp(self): if not self._last_response: self.get_info() if self._last_response['temp'] > self.alert_high: print('Temperature max of %s exceeded. Currently %s' % (self.alert_high, self._last_response['temp'])) if self._last_response['temp'] < self.alert_low: print('Temperature min of %s exceeded. Currently %s' % (self.alert_low, self._last_response['temp'])) if __name__ == '__main__': tw = TemperatureWatch() tw.thermostat_url = 'http://10.0.1.52' tw.check_temp()
import json import requests class TemperatureWatch(object): thermostat_url = None alert_high = 80 alert_low = 60 _last_response = None def get_info(self): r = requests.get(self.thermostat_url + '/tstat') self._last_response = json.loads(r.text) return r.text def check_temp(self): if not self._last_response: self.get_info() if self._last_response['temp'] > self.alert_high: self.alert('Temperature max of %s exceeded. Currently %s' % (self.alert_high, self._last_response['temp'])) if self._last_response['temp'] < self.alert_low: self.alert('Temperature min of %s exceeded. Currently %s' % (self.alert_low, self._last_response['temp'])) def alert(self, message): print(message) if __name__ == '__main__': tw = TemperatureWatch() tw.thermostat_url = 'http://10.0.1.52' tw.check_temp()
Break the alert into a method so subclasses can choose how the alert is relayed.
Break the alert into a method so subclasses can choose how the alert is relayed.
Python
bsd-3-clause
adamfast/tempwatcher
--- +++ @@ -18,10 +18,13 @@ self.get_info() if self._last_response['temp'] > self.alert_high: - print('Temperature max of %s exceeded. Currently %s' % (self.alert_high, self._last_response['temp'])) + self.alert('Temperature max of %s exceeded. Currently %s' % (self.alert_high, self._last_response['temp'])) if self._last_response['temp'] < self.alert_low: - print('Temperature min of %s exceeded. Currently %s' % (self.alert_low, self._last_response['temp'])) + self.alert('Temperature min of %s exceeded. Currently %s' % (self.alert_low, self._last_response['temp'])) + + def alert(self, message): + print(message) if __name__ == '__main__':
616d33e1c69af979163231ef8e9ca9a1f6c7bdf2
pyts/__init__.py
pyts/__init__.py
"""Time series transformation and classification module for Python. pyts is a Python package for time series transformation and classification. It aims to make time series classification easily accessible by providing preprocessing and utility tools, and implementations of state-of-the-art algorithms. Most of these algorithms transform time series, thus pyts provides several tools to perform these transformations. """ __version__ = '0.8.0' __all__ = ['approximation', 'bag_of_words', 'classification', 'decomposition', 'image', 'metrics', 'preprocessing', 'transformation', 'utils']
"""Time series transformation and classification module for Python. pyts is a Python package for time series transformation and classification. It aims to make time series classification easily accessible by providing preprocessing and utility tools, and implementations of state-of-the-art algorithms. Most of these algorithms transform time series, thus pyts provides several tools to perform these transformations. """ __version__ = '0.9.dev0' __all__ = ['approximation', 'bag_of_words', 'classification', 'decomposition', 'image', 'metrics', 'preprocessing', 'transformation', 'utils']
Update the version on master
Update the version on master
Python
bsd-3-clause
johannfaouzi/pyts
--- +++ @@ -7,7 +7,7 @@ several tools to perform these transformations. """ -__version__ = '0.8.0' +__version__ = '0.9.dev0' __all__ = ['approximation', 'bag_of_words', 'classification', 'decomposition', 'image', 'metrics', 'preprocessing', 'transformation', 'utils']
fff22f9305fd73f464a6ee9a66b676b66ef88cad
test/parseResults.py
test/parseResults.py
#!/usr/bin/env python3 import json import sys PREFIXES = [ ["FAIL", "PASS"], ["EXPECTED FAIL", "UNEXPECTED PASS"], ] def parse_expected_failures(): expected_failures = set() with open("expected-failures.txt", "r") as fp: for line in fp: line = line.strip() if not line: continue if line.startswith("#"): continue expected_failures.add(line) return expected_failures def main(filename): expected_failures = parse_expected_failures() with open(filename, "r") as fp: results = json.load(fp) unexpected_results = [] for test in results: expected_failure = test["file"] in expected_failures actual_result = test["result"]["pass"] print("{} {} ({})".format(PREFIXES[expected_failure][actual_result], test["file"], test["scenario"])) if actual_result == expected_failure: if not actual_result: print(test["rawResult"]["stderr"]) print(test["rawResult"]["stdout"]) print(test["result"]["message"]) unexpected_results.append(test) if unexpected_results: print("{} unexpected results:".format(len(unexpected_results))) for unexpected in unexpected_results: print("- {}".format(unexpected["file"])) return False print("All results as expected.") return True if __name__ == "__main__": sys.exit(0 if main(sys.argv[1]) else 1)
#!/usr/bin/env python3 import ijson import sys PREFIXES = [ ["FAIL", "PASS"], ["EXPECTED FAIL", "UNEXPECTED PASS"], ] def parse_expected_failures(): expected_failures = set() with open("expected-failures.txt", "r") as fp: for line in fp: line = line.strip() if not line: continue if line.startswith("#"): continue expected_failures.add(line) return expected_failures def main(filename): expected_failures = parse_expected_failures() with open(filename, "r") as fp: results = ijson.items(fp, "item") unexpected_results = [] for test in results: expected_failure = test["file"] in expected_failures actual_result = test["result"]["pass"] print("{} {} ({})".format(PREFIXES[expected_failure][actual_result], test["file"], test["scenario"])) if actual_result == expected_failure: if not actual_result: print(test["rawResult"]["stderr"]) print(test["rawResult"]["stdout"]) print(test["result"]["message"]) unexpected_results.append(test) if unexpected_results: print("{} unexpected results:".format(len(unexpected_results))) for unexpected in unexpected_results: print("- {}".format(unexpected["file"])) return False print("All results as expected.") return True if __name__ == "__main__": sys.exit(0 if main(sys.argv[1]) else 1)
Use ijson to parse the test262 results.
Use ijson to parse the test262 results. Parsing the output all at once can cause out-of-memory errors in automated testing.
Python
isc
js-temporal/temporal-polyfill,js-temporal/temporal-polyfill,js-temporal/temporal-polyfill
--- +++ @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -import json +import ijson import sys PREFIXES = [ @@ -25,19 +25,19 @@ def main(filename): expected_failures = parse_expected_failures() with open(filename, "r") as fp: - results = json.load(fp) + results = ijson.items(fp, "item") - unexpected_results = [] - for test in results: - expected_failure = test["file"] in expected_failures - actual_result = test["result"]["pass"] - print("{} {} ({})".format(PREFIXES[expected_failure][actual_result], test["file"], test["scenario"])) - if actual_result == expected_failure: - if not actual_result: - print(test["rawResult"]["stderr"]) - print(test["rawResult"]["stdout"]) - print(test["result"]["message"]) - unexpected_results.append(test) + unexpected_results = [] + for test in results: + expected_failure = test["file"] in expected_failures + actual_result = test["result"]["pass"] + print("{} {} ({})".format(PREFIXES[expected_failure][actual_result], test["file"], test["scenario"])) + if actual_result == expected_failure: + if not actual_result: + print(test["rawResult"]["stderr"]) + print(test["rawResult"]["stdout"]) + print(test["result"]["message"]) + unexpected_results.append(test) if unexpected_results: print("{} unexpected results:".format(len(unexpected_results)))
695ee95faf0ae80f0c69bf47e881af22ab0f00cd
l10n_it_esigibilita_iva/models/account.py
l10n_it_esigibilita_iva/models/account.py
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl). from odoo import models, fields class AccountTax(models.Model): _inherit = 'account.tax' payability = fields.Selection([ ('I', 'Immediate payability'), ('D', 'Deferred payability'), ('S', 'Split payment'), ], string="VAT payability")
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl). from odoo import models, fields class AccountTax(models.Model): _inherit = 'account.tax' payability = fields.Selection([ ('I', 'VAT payable immediately'), ('D', 'unrealized VAT'), ('S', 'split payments'), ], string="VAT payability")
Use correct english terms, from APPENDIX A -TECHNICAL SPECIFICATIONS
Use correct english terms, from APPENDIX A -TECHNICAL SPECIFICATIONS
Python
agpl-3.0
dcorio/l10n-italy,OCA/l10n-italy,OCA/l10n-italy,dcorio/l10n-italy,dcorio/l10n-italy,OCA/l10n-italy
--- +++ @@ -6,7 +6,7 @@ _inherit = 'account.tax' payability = fields.Selection([ - ('I', 'Immediate payability'), - ('D', 'Deferred payability'), - ('S', 'Split payment'), + ('I', 'VAT payable immediately'), + ('D', 'unrealized VAT'), + ('S', 'split payments'), ], string="VAT payability")
628ab56107783841d1e64b11c4b82eac4806c019
selenium_testcase/testcases/content.py
selenium_testcase/testcases/content.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from .utils import dom_contains, wait_for class ContentTestMixin: # Assert that the DOM contains the given text def should_see_immediately(self, text): self.assertTrue(dom_contains(self.browser, text)) # Repeatedly look for the given text until it appears (or we give up) @wait_for def should_see(self, text): """ Wait for text to appear before raising assertion. """ return self.should_see_immediately(text) @wait_for def has_title(self, title): """ Assert when page title does not match. """ self.assertEqual(self.browser.title, title)
# -*- coding: utf-8 -*- from __future__ import absolute_import from .utils import dom_contains, wait_for class ContentTestMixin: def should_see_immediately(self, text): """ Assert that DOM contains the given text. """ self.assertTrue(dom_contains(self.browser, text)) @wait_for def should_see(self, text): """ Wait for text to appear before testing assertion. """ return self.should_see_immediately(text) def should_not_see(self, text): """ Wait for text to not appear before testing assertion. """ self.assertRaises(AssertionError, self.should_see, text) @wait_for def has_title(self, title): """ Assert that page title matches. """ self.assertEqual(self.browser.title, title) def has_not_title(self, title): """ Assert when page title does not match. """ self.assertRaises(AssertionError, self.has_title, title) @wait_for def title_contains(self, text): """ Assert that page title contains text. """ self.assertIn(text, self.browser.title) def title_does_not_contain(self, text): """ Assert that page title does not contain text. """ self.assertRaises(AssertionError, self.title_contains, text)
Add should_not_see, has_not_title, title_does_not_contain to ContentTestMixin.
Add should_not_see, has_not_title, title_does_not_contain to ContentTestMixin. These methods cause a wait for the full duration of the @wait_for timeout when the assertion test is successful (and the given text is missing). It will fail fast, but success is slow. These negative information tests should be used sparingly.
Python
bsd-3-clause
nimbis/django-selenium-testcase,nimbis/django-selenium-testcase
--- +++ @@ -7,17 +7,33 @@ class ContentTestMixin: - # Assert that the DOM contains the given text def should_see_immediately(self, text): + """ Assert that DOM contains the given text. """ self.assertTrue(dom_contains(self.browser, text)) - # Repeatedly look for the given text until it appears (or we give up) @wait_for def should_see(self, text): - """ Wait for text to appear before raising assertion. """ + """ Wait for text to appear before testing assertion. """ return self.should_see_immediately(text) + + def should_not_see(self, text): + """ Wait for text to not appear before testing assertion. """ + self.assertRaises(AssertionError, self.should_see, text) @wait_for def has_title(self, title): + """ Assert that page title matches. """ + self.assertEqual(self.browser.title, title) + + def has_not_title(self, title): """ Assert when page title does not match. """ - self.assertEqual(self.browser.title, title) + self.assertRaises(AssertionError, self.has_title, title) + + @wait_for + def title_contains(self, text): + """ Assert that page title contains text. """ + self.assertIn(text, self.browser.title) + + def title_does_not_contain(self, text): + """ Assert that page title does not contain text. """ + self.assertRaises(AssertionError, self.title_contains, text)
e23dd39880dc849a56d5376dca318f8bcb2cd998
discover/__init__.py
discover/__init__.py
import logging import socket import boto LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s %(message)s' LOG_DATE = '%Y-%m-%d %I:%M:%S %p' logging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE, level=logging.WARN) logger = logging.getLogger('yoda-discover') logger.level = logging.INFO def port_test(port, host, protocol='tcp'): if isinstance(port, str): port = int(port) sock_type = socket.SOCK_DGRAM if protocol == 'udp' else socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, sock_type) sock.settimeout(2000) try: sock.connect((host, port)) sock.shutdown(socket.SHUT_RDWR) sock.close() return True except socket.error as error: logger.warn('Port test failed for host: %s port: %s. Reason: %s', host, port, error) return False def map_proxy_host(proxy_host): proxy_host = proxy_host.lower() if proxy_host.startswith('ec2:meta-data:'): meta_data = proxy_host.replace('ec2:meta-data:', '') return boto.utils.get_instance_metadata()[meta_data] return proxy_host
import logging import socket from boto.utils import get_instance_metadata LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s %(message)s' LOG_DATE = '%Y-%m-%d %I:%M:%S %p' logging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE, level=logging.WARN) logger = logging.getLogger('yoda-discover') logger.level = logging.INFO def port_test(port, host, protocol='tcp'): if isinstance(port, str): port = int(port) sock_type = socket.SOCK_DGRAM if protocol == 'udp' else socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, sock_type) sock.settimeout(2000) try: sock.connect((host, port)) sock.shutdown(socket.SHUT_RDWR) sock.close() return True except socket.error as error: logger.warn('Port test failed for host: %s port: %s. Reason: %s', host, port, error) return False def map_proxy_host(proxy_host): proxy_host = proxy_host.lower() if proxy_host.startswith('ec2:meta-data:'): meta_data = proxy_host.replace('ec2:meta-data:', '') return get_instance_metadata()[meta_data] return proxy_host
Fix import error for boto.utils
Fix import error for boto.utils
Python
mit
totem/yoda-discover
--- +++ @@ -1,6 +1,7 @@ import logging import socket -import boto +from boto.utils import get_instance_metadata + LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s %(message)s' LOG_DATE = '%Y-%m-%d %I:%M:%S %p' @@ -32,5 +33,5 @@ proxy_host = proxy_host.lower() if proxy_host.startswith('ec2:meta-data:'): meta_data = proxy_host.replace('ec2:meta-data:', '') - return boto.utils.get_instance_metadata()[meta_data] + return get_instance_metadata()[meta_data] return proxy_host
12aaf389356966e7f82c4a588e0ae888073da8dd
discussion/models.py
discussion/models.py
from django.contrib.auth.models import User from django.db import models class Discussion(models.Model): user = models.ForeignKey(User) name = models.CharField(max_length=255) slug = models.SlugField() def __unicode__(self): return self.name class Post(models.Model): discussion = models.ForeignKey(Discussion) user = models.ForeignKey(User) name = models.CharField(max_length=255) slug = models.SlugField() body = models.TextField() posts_file = models.FileField(upload_to='uploads/posts', blank=True, null=True) def __unicode__(self): return self.name class Comment(models.Model): post = models.ForeignKey(Post) user = models.ForeignKey(User) body = models.TextField() comment_file = models.FileField(upload_to='uploads/comments', blank=True, null=True) def __unicode__(self): return 'Comment on %s by %s' % (self.post.name, self.user)
from django.contrib.auth.models import User from django.db import models class Discussion(models.Model): user = models.ForeignKey(User) name = models.CharField(max_length=255) slug = models.SlugField() def __unicode__(self): return self.name class Post(models.Model): discussion = models.ForeignKey(Discussion) user = models.ForeignKey(User) slug = models.SlugField() body = models.TextField() posts_file = models.FileField(upload_to='uploads/posts', blank=True, null=True) def __unicode__(self): return self.name class Comment(models.Model): post = models.ForeignKey(Post) user = models.ForeignKey(User) body = models.TextField() comment_file = models.FileField(upload_to='uploads/comments', blank=True, null=True) def __unicode__(self): return 'Comment on %s by %s' % (self.post.name, self.user)
Remove "name" from discussion post
Remove "name" from discussion post
Python
bsd-2-clause
incuna/django-discussion,incuna/django-discussion,lehins/lehins-discussion,lehins/lehins-discussion,lehins/lehins-discussion
--- +++ @@ -14,7 +14,6 @@ class Post(models.Model): discussion = models.ForeignKey(Discussion) user = models.ForeignKey(User) - name = models.CharField(max_length=255) slug = models.SlugField() body = models.TextField() posts_file = models.FileField(upload_to='uploads/posts',
e59f8e72996b036b7c2df8f6b1054f82d730bdf8
tinycontent/admin.py
tinycontent/admin.py
from django.contrib import admin from tinycontent.models import TinyContent, TinyContentFileUpload class TinyContentAdmin(admin.ModelAdmin): list_display = ('name', ) search_fields = ('name', 'content', ) admin.site.register(TinyContent, TinyContentAdmin) class TinyContentFileUploadAdmin(admin.ModelAdmin): list_display = ('name', ) search_fields = ('name', ) admin.site.register(TinyContentFileUpload, TinyContentFileUploadAdmin)
from django.contrib import admin from tinycontent.models import TinyContent, TinyContentFileUpload class TinyContentAdmin(admin.ModelAdmin): list_display = ('name', ) search_fields = ('name', 'content', ) admin.site.register(TinyContent, TinyContentAdmin) class TinyContentFileUploadAdmin(admin.ModelAdmin): list_display = ('name', 'slug', ) search_fields = ('name', ) admin.site.register(TinyContentFileUpload, TinyContentFileUploadAdmin)
Make it easier to figure out a given file's slug
Make it easier to figure out a given file's slug
Python
bsd-3-clause
ad-m/django-tinycontent,dominicrodger/django-tinycontent,watchdogpolska/django-tinycontent,dominicrodger/django-tinycontent,ad-m/django-tinycontent,watchdogpolska/django-tinycontent
--- +++ @@ -10,7 +10,7 @@ class TinyContentFileUploadAdmin(admin.ModelAdmin): - list_display = ('name', ) + list_display = ('name', 'slug', ) search_fields = ('name', ) admin.site.register(TinyContentFileUpload, TinyContentFileUploadAdmin)
a5d7306cdda9e109abbb673b8474f8955a371266
partner_contact_birthdate/__openerp__.py
partner_contact_birthdate/__openerp__.py
# -*- coding: utf-8 -*- # Odoo, Open Source Management Solution # Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. { "name": "Contact's birthdate", "version": "9.0.1.0.0", "author": "Odoo Community Association (OCA)", "category": "Customer Relationship Management", "website": "https://odoo-community.org/", "depends": [ "base", ], "data": [ "views/res_partner.xml", ], 'installable': True, }
# -*- coding: utf-8 -*- # © <YEAR(S)> <AUTHOR(S)> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Contact's birthdate", "version": "9.0.1.0.0", "author": "Odoo Community Association (OCA)", "category": "Customer Relationship Management", "website": "https://odoo-community.org/", "depends": [ "base", ], "data": [ "views/res_partner.xml", ], 'installable': True, }
Fix header to short version
Fix header to short version
Python
agpl-3.0
sergiocorato/partner-contact
--- +++ @@ -1,20 +1,6 @@ # -*- coding: utf-8 -*- - -# Odoo, Open Source Management Solution -# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. +# © <YEAR(S)> <AUTHOR(S)> +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Contact's birthdate",
a3c68f6f70a2d4d1ecdcdb982eda9ec15fa4c127
utils.py
utils.py
from google.appengine.api import users from google.appengine.ext import db from model import User @db.transactional def create_user(google_user): user = User( google_user=google_user ) user.put() return user def get_current_user(): google_user = users.get_current_user() user = get_user_model_for(google_user) return user def get_user_model_for(google_user=None): return User.all().filter('google_user =', google_user).get() def get_user_model_by_id_or_nick(id_or_nick): if id_or_nick.isdigit(): return User.get_by_id(int(id_or_nick)) else: return User.all().filter('nickname_lower = ', id_or_nick.lower()).get()
from google.appengine.api import users from google.appengine.ext import db from model import User latest_signup = None @db.transactional def create_user(google_user): global latest_signup user = User( google_user=google_user ) user.put() latest_signup = user return user def get_current_user(): google_user = users.get_current_user() if latest_signup != None and google_user == latest_signup.google_user: return latest_signup user = get_user_model_for(google_user) return user def get_user_model_for(google_user=None): return User.all().filter('google_user =', google_user).get() def get_user_model_by_id_or_nick(id_or_nick): if id_or_nick.isdigit(): return User.get_by_id(int(id_or_nick)) else: return User.all().filter('nickname_lower = ', id_or_nick.lower()).get()
Fix bug where user could not be found
Fix bug where user could not be found This problem only occured when a request tried to find the user right after it had been created.
Python
mit
youtify/newscontrol,studyindenmark/newscontrol,studyindenmark/newscontrol,youtify/newscontrol
--- +++ @@ -3,16 +3,24 @@ from model import User +latest_signup = None + @db.transactional def create_user(google_user): + global latest_signup user = User( google_user=google_user ) user.put() + latest_signup = user return user def get_current_user(): google_user = users.get_current_user() + + if latest_signup != None and google_user == latest_signup.google_user: + return latest_signup + user = get_user_model_for(google_user) return user
9f8b555698471987a5a635f69fa0ad68a4b28134
14/src.py
14/src.py
import sys import itertools import re from md5 import md5 puzzle_input = 'abc' # 'yjdafjpo' def key(n): return md5(puzzle_input + str(n)).hexdigest() def otp_keys(horizon): lookahead = {k: -1 for k in '0123456789abcdef'} def update_lookahead(n): for quint in re.finditer(r'(.)\1{4}', key(n)): lookahead[quint.group(1)] = n for i in xrange(1, horizon): update_lookahead(i) for i in itertools.count(): update_lookahead(i + horizon) triple = re.search(r'(.)\1{2}', key(i)) if triple: if lookahead[triple.group(1)] >= i: yield i if __name__ == '__main__': keys = otp_keys(1000) for ret in enumerate(keys): print ret
import sys import itertools import re from md5 import md5 puzzle_input = 'yjdafjpo' def key(n): return md5(puzzle_input + str(n)).hexdigest() def otp_keys(horizon): lookahead = {k: -1 for k in '0123456789abcdef'} def update_lookahead(n): for quint in re.finditer(r'(.)\1{4}', key(n)): lookahead[quint.group(1)] = n for i in xrange(1, horizon): update_lookahead(i) for i in itertools.count(): update_lookahead(i + horizon) triple = re.search(r'(.)\1{2}', key(i)) if triple: if lookahead[triple.group(1)] > i: yield i if __name__ == '__main__': keys = otp_keys(1000) for ret in enumerate(keys): print ret
Fix problem in previous version
Fix problem in previous version I was accidentally allowing the current hash's quint to match its triple
Python
mit
amalloy/advent-of-code-2016
--- +++ @@ -3,7 +3,7 @@ import re from md5 import md5 -puzzle_input = 'abc' # 'yjdafjpo' +puzzle_input = 'yjdafjpo' def key(n): return md5(puzzle_input + str(n)).hexdigest() @@ -20,7 +20,7 @@ update_lookahead(i + horizon) triple = re.search(r'(.)\1{2}', key(i)) if triple: - if lookahead[triple.group(1)] >= i: + if lookahead[triple.group(1)] > i: yield i if __name__ == '__main__':
2cdb6a5eeb1730627cea2a812d590efed82d03fb
acceptance_tests/test_course_learners.py
acceptance_tests/test_course_learners.py
from unittest import skipUnless from bok_choy.web_app_test import WebAppTest from acceptance_tests import ENABLE_LEARNER_ANALYTICS from acceptance_tests.mixins import CoursePageTestsMixin from acceptance_tests.pages import CourseLearnersPage @skipUnless(ENABLE_LEARNER_ANALYTICS, 'Learner Analytics must be enabled to run CourseLearnersTests') class CourseLearnersTests(CoursePageTestsMixin, WebAppTest): def setUp(self): super(CourseLearnersTests, self).setUp() self.page = CourseLearnersPage(self.browser) def _test_data_update_message(self): # Don't test the update message for now, since it won't exist # until the SPA adds it to the page in AN-6205. pass def _get_data_update_message(self): # Don't test the update message for now, since it won't exist # until the SPA adds it to the page in AN-6205. return ''
from unittest import skipUnless from bok_choy.web_app_test import WebAppTest from acceptance_tests import ENABLE_LEARNER_ANALYTICS from acceptance_tests.mixins import CoursePageTestsMixin from acceptance_tests.pages import CourseLearnersPage @skipUnless(ENABLE_LEARNER_ANALYTICS, 'Learner Analytics must be enabled to run CourseLearnersTests') class CourseLearnersTests(CoursePageTestsMixin, WebAppTest): help_path = 'engagement/learners.html' def setUp(self): super(CourseLearnersTests, self).setUp() self.page = CourseLearnersPage(self.browser) def _test_data_update_message(self): # Don't test the update message for now, since it won't exist # until the SPA adds it to the page in AN-6205. pass def _get_data_update_message(self): # Don't test the update message for now, since it won't exist # until the SPA adds it to the page in AN-6205. return ''
Add test for learners help link
Add test for learners help link
Python
agpl-3.0
Stanford-Online/edx-analytics-dashboard,Stanford-Online/edx-analytics-dashboard,edx/edx-analytics-dashboard,Stanford-Online/edx-analytics-dashboard,edx/edx-analytics-dashboard,edx/edx-analytics-dashboard,Stanford-Online/edx-analytics-dashboard,edx/edx-analytics-dashboard
--- +++ @@ -9,6 +9,8 @@ @skipUnless(ENABLE_LEARNER_ANALYTICS, 'Learner Analytics must be enabled to run CourseLearnersTests') class CourseLearnersTests(CoursePageTestsMixin, WebAppTest): + help_path = 'engagement/learners.html' + def setUp(self): super(CourseLearnersTests, self).setUp() self.page = CourseLearnersPage(self.browser)
743064dbe22e40928c50817417077b8d52de641c
twistedchecker/functionaltests/comments.py
twistedchecker/functionaltests/comments.py
# enable: W9401,W9402 #A comment does not begin with a whitespace. a = 1 + 2 # A comment begins with two whitespace. # a comment begins with a lowercase letter. # Good comment examples. # A sentence that spans multiple lines # doesn't need to have capitalization on second line. # Here's some code samples: # x = x + 1 # Make sure no error occur when checking an empty comment # # '\r\n\t' a comment can start with a new lines characters. var = 1 + 2 # \r\n same for inline comments.
# enable: W9401,W9402 #A comment does not begin with a whitespace. a = 1 + 2 # A comment begins with two whitespace. # a comment begins with a lowercase letter. # Good comment examples. # A sentence that spans multiple lines # doesn't need to have capitalization on second line. # Here's some code samples: # x = x + 1 # Make sure no error occur when checking an empty comment # # '\r\n\t' a comment can start with a new lines characters. var = 1 + 2 # \r\n same for inline comments. # `literal` is fine at the start.
Add example with back ticks.
Add example with back ticks.
Python
mit
twisted/twistedchecker
--- +++ @@ -20,3 +20,5 @@ # '\r\n\t' a comment can start with a new lines characters. var = 1 + 2 # \r\n same for inline comments. + +# `literal` is fine at the start.
aabf28c02a4dff593e5e4b156052adb9b81a70c7
skflow/ops/tests/test_dropout_ops.py
skflow/ops/tests/test_dropout_ops.py
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from skflow import ops class DropoutTest(tf.test.TestCase): def test_dropout_float(self): with self.test_session(): x = tf.placeholder(tf.float32, [5, 5]) y = ops.dropout(x, 0.5) probs = tf.get_collection(ops.DROPOUTS) self.assertEqual(len(probs), 1) if __name__ == '__main__': tf.test.main()
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from skflow import ops class DropoutTest(tf.test.TestCase): def test_dropout_float(self): with self.test_session() as session: x = tf.placeholder(tf.float32, [5, 5]) y = ops.dropout(x, 0.5) probs = tf.get_collection(ops.DROPOUTS) session.run(tf.initialize_all_variables()) self.assertEqual(len(probs), 1) self.assertEqual(session.run(probs[0]), 0.5) def test_dropout_tensor(self): with self.test_session(): x = tf.placeholder(tf.float32, [5, 5]) y = tf.get_variable("prob", [], initializer=tf.constant_initializer(0.5)) z = ops.dropout(x, y) probs = tf.get_collection(ops.DROPOUTS) self.assertEqual(probs, [y]) if __name__ == '__main__': tf.test.main()
Test for dropout probability be a tensor
Test for dropout probability be a tensor
Python
apache-2.0
handroissuazo/tensorflow,elingg/tensorflow,awni/tensorflow,aselle/tensorflow,theflofly/tensorflow,ishay2b/tensorflow,XueqingLin/tensorflow,DavidNorman/tensorflow,odejesush/tensorflow,taknevski/tensorflow-xsmm,AndreasMadsen/tensorflow,Kongsea/tensorflow,hfp/tensorflow-xsmm,jhaux/tensorflow,JingJunYin/tensorflow,tensorflow/tensorflow,mixturemodel-flow/tensorflow,aam-at/tensorflow,ibab/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Carmezim/tensorflow,yongtang/tensorflow,ageron/tensorflow,mrry/tensorflow,HKUST-SING/tensorflow,av8ramit/tensorflow,whn09/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ppwwyyxx/tensorflow,anilmuthineni/tensorflow,laosiaudi/tensorflow,gnieboer/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,gnieboer/tensorflow,annarev/tensorflow,haeusser/tensorflow,JingJunYin/tensorflow,snnn/tensorflow,lukeiwanski/tensorflow-opencl,ibab/tensorflow,alshedivat/tensorflow,AnishShah/tensorflow,anand-c-goog/tensorflow,DCSaunders/tensorflow,seanli9jan/tensorflow,scenarios/tensorflow,xodus7/tensorflow,rabipanda/tensorflow,LUTAN/tensorflow,dancingdan/tensorflow,jart/tensorflow,alisidd/tensorflow,zycdragonball/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,handroissuazo/tensorflow,nolanliou/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,vrv/tensorflow,andrewcmyers/tensorflow,girving/tensorflow,nanditav/15712-TensorFlow,AndreasMadsen/tensorflow,LUTAN/tensorflow,ivano666/tensorflow,asadziach/tensorflow,tntnatbry/tensorflow,DavidNorman/tensorflow,dancingdan/tensorflow,paolodedios/tensorflow,sandeepdsouza93/TensorFlow-15712,dongjoon-hyun/tensorflow,thesuperzapper/tensorflow,dyoung418/tensorflow,alivecor/tensorflow,asimshankar/tensorflow,seanli9jan/tensorflow,ppwwyyxx/tensorflow,hsaputra/tensorflow,strint/tensorflow,gautam1858/tensorflow,benoitsteiner/tensorflow-opencl,wangyum/tensorflow,benoitsteiner/tensorflow-opencl,johndpope/tensorflow,jalexvig/tensorflow,freedomtan/tensorflow,TakayukiSakai/tensorflow,Xeralux/tensorflow,dendisuhubdy/tensorflow,kevin-coder/tensorflow-fork,bowang/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,alsrgv/tensorflow,benoitsteiner/tensorflow,dyoung418/tensorflow,chris-chris/tensorflow,mavenlin/tensorflow,wangyum/tensorflow,mengxn/tensorflow,horance-liu/tensorflow,aam-at/tensorflow,ZhangXinNan/tensorflow,kevin-coder/tensorflow-fork,lakshayg/tensorflow,calebfoss/tensorflow,mavenlin/tensorflow,a-doumoulakis/tensorflow,tongwang01/tensorflow,gojira/tensorflow,tongwang01/tensorflow,with-git/tensorflow,llhe/tensorflow,jhaux/tensorflow,nburn42/tensorflow,Carmezim/tensorflow,dhalleine/tensorflow,jhseu/tensorflow,aam-at/tensorflow,johndpope/tensorflow,annarev/tensorflow,adit-chandra/tensorflow,cg31/tensorflow,AnishShah/tensorflow,brchiu/tensorflow,manipopopo/tensorflow,dyoung418/tensorflow,abhitopia/tensorflow,kevin-coder/tensorflow-fork,Bismarrck/tensorflow,karllessard/tensorflow,MoamerEncsConcordiaCa/tensorflow,ZhangXinNan/tensorflow,meteorcloudy/tensorflow,frreiss/tensorflow-fred,ychfan/tensorflow,chenjun0210/tensorflow,alheinecke/tensorflow-xsmm,Mazecreator/tensorflow,nburn42/tensorflow,renyi533/tensorflow,chemelnucfin/tensorflow,AnishShah/tensorflow,eadgarchen/tensorflow,alshedivat/tensorflow,lukeiwanski/tensorflow-opencl,martinwicke/tensorflow,hsaputra/tensorflow,LUTAN/tensorflow,theflofly/tensorflow,alistairlow/tensorflow,jendap/tensorflow,mavenlin/tensorflow,alivecor/tensorflow,panmari/tensorflow,jhseu/tensorflow,Mistobaan/tensorflow,arborh/tensorflow,asimshankar/tensorflow,alsrgv/tensorflow,unsiloai/syntaxnet-ops-hack,manazhao/tf_recsys,karllessard/tensorflow,LUTAN/tensorflow,laosiaudi/tensorflow,jendap/tensorflow,aldian/tensorflow,xodus7/tensorflow,sandeepgupta2k4/tensorflow,jhseu/tensorflow,MycChiu/tensorflow,snnn/tensorflow,seanli9jan/tensorflow,strint/tensorflow,cancan101/tensorflow,aldian/tensorflow,theflofly/tensorflow,ppwwyyxx/tensorflow,brchiu/tensorflow,apark263/tensorflow,chemelnucfin/tensorflow,gibiansky/tensorflow,dyoung418/tensorflow,TakayukiSakai/tensorflow,freedomtan/tensorflow,cxxgtxy/tensorflow,ibab/tensorflow,manazhao/tf_recsys,Xeralux/tensorflow,cxxgtxy/tensorflow,tillahoffmann/tensorflow,ivano666/tensorflow,gunan/tensorflow,seanli9jan/tensorflow,code-sauce/tensorflow,ivano666/tensorflow,adamtiger/tensorflow,haeusser/tensorflow,dancingdan/tensorflow,Bulochkin/tensorflow_pack,eadgarchen/tensorflow,ibmsoe/tensorflow,calebfoss/tensorflow,caisq/tensorflow,ppwwyyxx/tensorflow,arborh/tensorflow,nikste/tensorflow,manazhao/tf_recsys,petewarden/tensorflow_makefile,jostep/tensorflow,hsaputra/tensorflow,suiyuan2009/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jwlawson/tensorflow,freedomtan/tensorflow,nanditav/15712-TensorFlow,ninotoshi/tensorflow,pierreg/tensorflow,whn09/tensorflow,freedomtan/tensorflow,dansbecker/skflow,alistairlow/tensorflow,brchiu/tensorflow,elingg/tensorflow,EvenStrangest/tensorflow,lukeiwanski/tensorflow-opencl,DavidNorman/tensorflow,Moriadry/tensorflow,paolodedios/tensorflow,a-doumoulakis/tensorflow,alshedivat/tensorflow,Kongsea/tensorflow,asadziach/tensorflow,chemelnucfin/tensorflow,annarev/tensorflow,dancingdan/tensorflow,MycChiu/tensorflow,davidzchen/tensorflow,kobejean/tensorflow,eadgarchen/tensorflow,gunan/tensorflow,Carmezim/tensorflow,gnieboer/tensorflow,mengxn/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,jwlawson/tensorflow,Mazecreator/tensorflow,alsrgv/tensorflow,lukeiwanski/tensorflow,drpngx/tensorflow,thesuperzapper/tensorflow,LUTAN/tensorflow,alisidd/tensorflow,rabipanda/tensorflow,adit-chandra/tensorflow,rabipanda/tensorflow,alheinecke/tensorflow-xsmm,alisidd/tensorflow,jbedorf/tensorflow,SnakeJenny/TensorFlow,ghchinoy/tensorflow,alsrgv/tensorflow,davidzchen/tensorflow,kevin-coder/tensorflow-fork,peterbraden/tensorflow,Xeralux/tensorflow,ravindrapanda/tensorflow,peterbraden/tensorflow,memo/tensorflow,wangyum/tensorflow,taknevski/tensorflow-xsmm,allenlavoie/tensorflow,krikru/tensorflow-opencl,MoamerEncsConcordiaCa/tensorflow,llhe/tensorflow,kevin-coder/tensorflow-fork,SnakeJenny/TensorFlow,taknevski/tensorflow-xsmm,kchodorow/tensorflow,hsaputra/tensorflow,jbedorf/tensorflow,aam-at/tensorflow,yaroslavvb/tensorflow,SnakeJenny/TensorFlow,ishay2b/tensorflow,sjperkins/tensorflow,jeffzheng1/tensorflow,lakshayg/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tomasreimers/tensorflow-emscripten,tensorflow/tensorflow-experimental_link_static_libraries_once,ppwwyyxx/tensorflow,cancan101/tensorflow,chemelnucfin/tensorflow,snnn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,benoitsteiner/tensorflow-opencl,ibab/tensorflow,codrut3/tensorflow,wchan/tensorflow,ychfan/tensorflow,jalexvig/tensorflow,kobejean/tensorflow,asadziach/tensorflow,tomasreimers/tensorflow-emscripten,pavelchristof/gomoku-ai,Bulochkin/tensorflow_pack,chenjun0210/tensorflow,dyoung418/tensorflow,code-sauce/tensorflow,thjashin/tensorflow,taknevski/tensorflow-xsmm,ageron/tensorflow,anand-c-goog/tensorflow,ninotoshi/tensorflow,yanchen036/tensorflow,mortada/tensorflow,tomasreimers/tensorflow-emscripten,kamcpp/tensorflow,thesuperzapper/tensorflow,tensorflow/tensorflow-pywrap_saved_model,neilhan/tensorflow,admcrae/tensorflow,drpngx/tensorflow,frreiss/tensorflow-fred,yaroslavvb/tensorflow,xzturn/tensorflow,benoitsteiner/tensorflow,adamtiger/tensorflow,gojira/tensorflow,caisq/tensorflow,lukeiwanski/tensorflow-opencl,DCSaunders/tensorflow,ppries/tensorflow,davidzchen/tensorflow,benoitsteiner/tensorflow,Xeralux/tensorflow,gojira/tensorflow,raymondxyang/tensorflow,jhseu/tensorflow,strint/tensorflow,alivecor/tensorflow,drpngx/tensorflow,JingJunYin/tensorflow,snnn/tensorflow,guschmue/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,arborh/tensorflow,gnieboer/tensorflow,haeusser/tensorflow,raymondxyang/tensorflow,anilmuthineni/tensorflow,benoitsteiner/tensorflow-xsmm,manipopopo/tensorflow,ppries/tensorflow,asimshankar/tensorflow,chenjun0210/tensorflow,pcm17/tensorflow,vrv/tensorflow,xodus7/tensorflow,nolanliou/tensorflow,code-sauce/tensorflow,eerwitt/tensorflow,Bismarrck/tensorflow,DavidNorman/tensorflow,ravindrapanda/tensorflow,Mistobaan/tensorflow,jbedorf/tensorflow,girving/tensorflow,sarvex/tensorflow,jalexvig/tensorflow,hfp/tensorflow-xsmm,petewarden/tensorflow,dancingdan/tensorflow,ivano666/tensorflow,codrut3/tensorflow,DCSaunders/tensorflow,mixturemodel-flow/tensorflow,jendap/tensorflow,DCSaunders/tensorflow,MycChiu/tensorflow,AnishShah/tensorflow,MycChiu/tensorflow,gibiansky/tensorflow,Intel-tensorflow/tensorflow,JVillella/tensorflow,markslwong/tensorflow,jwlawson/tensorflow,mrry/tensorflow,nburn42/tensorflow,admcrae/tensorflow,jbedorf/tensorflow,ravindrapanda/tensorflow,seanli9jan/tensorflow,eaplatanios/tensorflow,hsaputra/tensorflow,EvenStrangest/tensorflow,gnieboer/tensorflow,adit-chandra/tensorflow,wchan/tensorflow,pcm17/tensorflow,thesuperzapper/tensorflow,martinwicke/tensorflow,seaotterman/tensorflow,gnieboer/tensorflow,lukeiwanski/tensorflow,sandeepdsouza93/TensorFlow-15712,asimshankar/tensorflow,Intel-tensorflow/tensorflow,guschmue/tensorflow,ibab/tensorflow,jbedorf/tensorflow,chemelnucfin/tensorflow,gautam1858/tensorflow,raymondxyang/tensorflow,HKUST-SING/tensorflow,lukeiwanski/tensorflow-opencl,ppries/tensorflow,sandeepdsouza93/TensorFlow-15712,pavelchristof/gomoku-ai,ishay2b/tensorflow,whn09/tensorflow,ibmsoe/tensorflow,meteorcloudy/tensorflow,Carmezim/tensorflow,whn09/tensorflow,a-doumoulakis/tensorflow,theflofly/tensorflow,jhseu/tensorflow,ArtsiomCh/tensorflow,ppries/tensorflow,jart/tensorflow,llhe/tensorflow,dhalleine/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,HKUST-SING/tensorflow,rdipietro/tensorflow,ppries/tensorflow,nolanliou/tensorflow,benoitsteiner/tensorflow,ghchinoy/tensorflow,sarvex/tensorflow,seanli9jan/tensorflow,Carmezim/tensorflow,chris-chris/tensorflow,davidzchen/tensorflow,eaplatanios/tensorflow,pavelchristof/gomoku-ai,thjashin/tensorflow,mortada/tensorflow,neilhan/tensorflow,Xeralux/tensorflow,mdrumond/tensorflow,paolodedios/tensorflow,cancan101/tensorflow,jhaux/tensorflow,hehongliang/tensorflow,dendisuhubdy/tensorflow,Bismarrck/tensorflow,renyi533/tensorflow,renyi533/tensorflow,tillahoffmann/tensorflow,apark263/tensorflow,chenjun0210/tensorflow,anilmuthineni/tensorflow,Intel-Corporation/tensorflow,theflofly/tensorflow,manipopopo/tensorflow,manjunaths/tensorflow,alshedivat/tensorflow,nightjean/Deep-Learning,lakshayg/tensorflow,kobejean/tensorflow,cg31/tensorflow,handroissuazo/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Carmezim/tensorflow,aldian/tensorflow,panmari/tensorflow,frreiss/tensorflow-fred,odejesush/tensorflow,tillahoffmann/tensorflow,ppwwyyxx/tensorflow,Moriadry/tensorflow,xzturn/tensorflow,ravindrapanda/tensorflow,jendap/tensorflow,jart/tensorflow,paolodedios/tensorflow,manjunaths/tensorflow,yufengg/tensorflow,johndpope/tensorflow,jhaux/tensorflow,caisq/tensorflow,jalexvig/tensorflow,annarev/tensorflow,naturali/tensorflow,handroissuazo/tensorflow,mrry/tensorflow,jwlawson/tensorflow,strint/tensorflow,maciekcc/tensorflow,nolanliou/tensorflow,meteorcloudy/tensorflow,ageron/tensorflow,lakshayg/tensorflow,odejesush/tensorflow,adit-chandra/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,karllessard/tensorflow,DCSaunders/tensorflow,snnn/tensorflow,yufengg/tensorflow,Bismarrck/tensorflow,scenarios/tensorflow,chenjun0210/tensorflow,LUTAN/tensorflow,thjashin/tensorflow,tongwang01/tensorflow,awni/tensorflow,ville-k/tensorflow,Kongsea/tensorflow,code-sauce/tensorflow,ran5515/DeepDecision,nanditav/15712-TensorFlow,apark263/tensorflow,AndreasMadsen/tensorflow,seaotterman/tensorflow,tomasreimers/tensorflow-emscripten,RapidApplicationDevelopment/tensorflow,nburn42/tensorflow,ghchinoy/tensorflow,bowang/tensorflow,alshedivat/tensorflow,ArtsiomCh/tensorflow,alivecor/tensorflow,ran5515/DeepDecision,dendisuhubdy/tensorflow,TakayukiSakai/tensorflow,Moriadry/tensorflow,karllessard/tensorflow,XueqingLin/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,manipopopo/tensorflow,AndreasMadsen/tensorflow,unsiloai/syntaxnet-ops-hack,ninotoshi/tensorflow,MostafaGazar/tensorflow,jwlawson/tensorflow,manjunaths/tensorflow,ZhangXinNan/tensorflow,nanditav/15712-TensorFlow,tomasreimers/tensorflow-emscripten,tiagofrepereira2012/tensorflow,panmari/tensorflow,abhitopia/tensorflow,kobejean/tensorflow,DavidNorman/tensorflow,dongjoon-hyun/tensorflow,strint/tensorflow,pierreg/tensorflow,tensorflow/tensorflow,horance-liu/tensorflow,drpngx/tensorflow,dyoung418/tensorflow,tornadozou/tensorflow,nburn42/tensorflow,XueqingLin/tensorflow,drpngx/tensorflow,Mazecreator/tensorflow,Intel-Corporation/tensorflow,abhitopia/tensorflow,cxxgtxy/tensorflow,aselle/tensorflow,asimshankar/tensorflow,Bulochkin/tensorflow_pack,seaotterman/tensorflow,jhseu/tensorflow,seanli9jan/tensorflow,zasdfgbnm/tensorflow,MoamerEncsConcordiaCa/tensorflow,tntnatbry/tensorflow,girving/tensorflow,MostafaGazar/tensorflow,cancan101/tensorflow,calebfoss/tensorflow,tensorflow/tensorflow,ghchinoy/tensorflow,with-git/tensorflow,haeusser/tensorflow,maciekcc/tensorflow,krikru/tensorflow-opencl,ZhangXinNan/tensorflow,codrut3/tensorflow,ychfan/tensorflow,hehongliang/tensorflow,pierreg/tensorflow,bowang/tensorflow,girving/tensorflow,girving/tensorflow,Intel-tensorflow/tensorflow,theflofly/tensorflow,manazhao/tf_recsys,admcrae/tensorflow,chenjun0210/tensorflow,jendap/tensorflow,benoitsteiner/tensorflow-opencl,ville-k/tensorflow,chenjun0210/tensorflow,thjashin/tensorflow,lukeiwanski/tensorflow,arborh/tensorflow,admcrae/tensorflow,Mistobaan/tensorflow,cancan101/tensorflow,ageron/tensorflow,dongjoon-hyun/tensorflow,hehongliang/tensorflow,vrv/tensorflow,odejesush/tensorflow,MostafaGazar/tensorflow,alistairlow/tensorflow,eerwitt/tensorflow,eaplatanios/tensorflow,xodus7/tensorflow,kamcpp/tensorflow,alistairlow/tensorflow,dancingdan/tensorflow,mengxn/tensorflow,rdipietro/tensorflow,ibmsoe/tensorflow,codrut3/tensorflow,drpngx/tensorflow,xzturn/tensorflow,jeffzheng1/tensorflow,HaebinShin/tensorflow,bowang/tensorflow,yongtang/tensorflow,eerwitt/tensorflow,ville-k/tensorflow,lukeiwanski/tensorflow,ibab/tensorflow,Carmezim/tensorflow,kchodorow/tensorflow,laszlocsomor/tensorflow,juharris/tensorflow,Intel-Corporation/tensorflow,guschmue/tensorflow,nightjean/Deep-Learning,gojira/tensorflow,zasdfgbnm/tensorflow,karllessard/tensorflow,Xeralux/tensorflow,frreiss/tensorflow-fred,tntnatbry/tensorflow,dendisuhubdy/tensorflow,ghchinoy/tensorflow,alistairlow/tensorflow,XueqingLin/tensorflow,Intel-tensorflow/tensorflow,taknevski/tensorflow-xsmm,suiyuan2009/tensorflow,nburn42/tensorflow,ravindrapanda/tensorflow,nightjean/Deep-Learning,brchiu/tensorflow,hsaputra/tensorflow,handroissuazo/tensorflow,wchan/tensorflow,horance-liu/tensorflow,tntnatbry/tensorflow,benoitsteiner/tensorflow-opencl,jostep/tensorflow,yanchen036/tensorflow,scenarios/tensorflow,kevin-coder/tensorflow-fork,paolodedios/tensorflow,pcm17/tensorflow,annarev/tensorflow,wangyum/tensorflow,gojira/tensorflow,calebfoss/tensorflow,asimshankar/tensorflow,nikste/tensorflow,martinwicke/tensorflow,jwlawson/tensorflow,jbedorf/tensorflow,alivecor/tensorflow,llhe/tensorflow,av8ramit/tensorflow,benoitsteiner/tensorflow-opencl,XueqingLin/tensorflow,ravindrapanda/tensorflow,vrv/tensorflow,ivano666/tensorflow,mdrumond/tensorflow,pierreg/tensorflow,vrv/tensorflow,Bulochkin/tensorflow_pack,seaotterman/tensorflow,alistairlow/tensorflow,hfp/tensorflow-xsmm,martinwicke/tensorflow,raymondxyang/tensorflow,mavenlin/tensorflow,adit-chandra/tensorflow,Bulochkin/tensorflow_pack,chemelnucfin/tensorflow,calebfoss/tensorflow,alheinecke/tensorflow-xsmm,yaroslavvb/tensorflow,ghchinoy/tensorflow,neilhan/tensorflow,nikste/tensorflow,anand-c-goog/tensorflow,memo/tensorflow,krikru/tensorflow-opencl,alheinecke/tensorflow-xsmm,adamtiger/tensorflow,jalexvig/tensorflow,frreiss/tensorflow-fred,kamcpp/tensorflow,aldian/tensorflow,ville-k/tensorflow,pavelchristof/gomoku-ai,Intel-tensorflow/tensorflow,alivecor/tensorflow,allenlavoie/tensorflow,av8ramit/tensorflow,petewarden/tensorflow_makefile,memo/tensorflow,sandeepgupta2k4/tensorflow,alisidd/tensorflow,manazhao/tf_recsys,jart/tensorflow,paolodedios/tensorflow,XueqingLin/tensorflow,haeusser/tensorflow,jwlawson/tensorflow,krikru/tensorflow-opencl,jwlawson/tensorflow,av8ramit/tensorflow,chenjun0210/tensorflow,caisq/tensorflow,lukeiwanski/tensorflow-opencl,cg31/tensorflow,ninotoshi/tensorflow,ravindrapanda/tensorflow,nikste/tensorflow,mengxn/tensorflow,davidzchen/tensorflow,jeffzheng1/tensorflow,Bulochkin/tensorflow_pack,ppwwyyxx/tensorflow,eerwitt/tensorflow,zycdragonball/tensorflow,snnn/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,dancingdan/tensorflow,raymondxyang/tensorflow,eerwitt/tensorflow,mixturemodel-flow/tensorflow,ageron/tensorflow,kevin-coder/tensorflow-fork,yufengg/tensorflow,zasdfgbnm/tensorflow,manipopopo/tensorflow,ppwwyyxx/tensorflow,krikru/tensorflow-opencl,allenlavoie/tensorflow,HaebinShin/tensorflow,anilmuthineni/tensorflow,ageron/tensorflow,jart/tensorflow,dhalleine/tensorflow,petewarden/tensorflow_makefile,laszlocsomor/tensorflow,tensorflow/skflow,JVillella/tensorflow,davidzchen/tensorflow,odejesush/tensorflow,lukeiwanski/tensorflow-opencl,kamcpp/tensorflow,jalexvig/tensorflow,anand-c-goog/tensorflow,eadgarchen/tensorflow,eaplatanios/tensorflow,manipopopo/tensorflow,nburn42/tensorflow,jwlawson/tensorflow,manjunaths/tensorflow,ychfan/tensorflow,AnishShah/tensorflow,juharris/tensorflow,jeffzheng1/tensorflow,bowang/tensorflow,Mistobaan/tensorflow,Xeralux/tensorflow,neilhan/tensorflow,laszlocsomor/tensorflow,nanditav/15712-TensorFlow,hfp/tensorflow-xsmm,tillahoffmann/tensorflow,HaebinShin/tensorflow,naturali/tensorflow,markslwong/tensorflow,renyi533/tensorflow,nightjean/Deep-Learning,sandeepdsouza93/TensorFlow-15712,odejesush/tensorflow,HaebinShin/tensorflow,ZhangXinNan/tensorflow,haeusser/tensorflow,suiyuan2009/tensorflow,allenlavoie/tensorflow,codrut3/tensorflow,nikste/tensorflow,gojira/tensorflow,JVillella/tensorflow,paolodedios/tensorflow,sandeepdsouza93/TensorFlow-15712,nburn42/tensorflow,SnakeJenny/TensorFlow,gautam1858/tensorflow,sarvex/tensorflow,TakayukiSakai/tensorflow,yongtang/tensorflow,apark263/tensorflow,petewarden/tensorflow_makefile,av8ramit/tensorflow,admcrae/tensorflow,tomasreimers/tensorflow-emscripten,martinwicke/tensorflow,gautam1858/tensorflow,JingJunYin/tensorflow,aam-at/tensorflow,strint/tensorflow,dendisuhubdy/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,benoitsteiner/tensorflow-xsmm,annarev/tensorflow,tillahoffmann/tensorflow,Moriadry/tensorflow,dongjoon-hyun/tensorflow,kchodorow/tensorflow,EvenStrangest/tensorflow,mrry/tensorflow,yufengg/tensorflow,pierreg/tensorflow,anilmuthineni/tensorflow,maciekcc/tensorflow,renyi533/tensorflow,vrv/tensorflow,maciekcc/tensorflow,Carmezim/tensorflow,anilmuthineni/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,elingg/tensorflow,sjperkins/tensorflow,lukeiwanski/tensorflow-opencl,laosiaudi/tensorflow,cg31/tensorflow,ArtsiomCh/tensorflow,asadziach/tensorflow,alistairlow/tensorflow,ghchinoy/tensorflow,lukeiwanski/tensorflow,with-git/tensorflow,EvenStrangest/tensorflow,kobejean/tensorflow,sandeepdsouza93/TensorFlow-15712,pavelchristof/gomoku-ai,vrv/tensorflow,ravindrapanda/tensorflow,aam-at/tensorflow,jalexvig/tensorflow,tongwang01/tensorflow,Bismarrck/tensorflow,jhseu/tensorflow,Intel-tensorflow/tensorflow,seaotterman/tensorflow,meteorcloudy/tensorflow,ishay2b/tensorflow,jendap/tensorflow,tongwang01/tensorflow,apark263/tensorflow,EvenStrangest/tensorflow,horance-liu/tensorflow,xzturn/tensorflow,guschmue/tensorflow,bowang/tensorflow,adit-chandra/tensorflow,jhaux/tensorflow,ageron/tensorflow,andrewcmyers/tensorflow,yanchen036/tensorflow,gunan/tensorflow,handroissuazo/tensorflow,JingJunYin/tensorflow,ibmsoe/tensorflow,Intel-Corporation/tensorflow,ravindrapanda/tensorflow,mavenlin/tensorflow,rdipietro/tensorflow,jhaux/tensorflow,mdrumond/tensorflow,tongwang01/tensorflow,petewarden/tensorflow,zycdragonball/tensorflow,ivano666/tensorflow,AnishShah/tensorflow,alsrgv/tensorflow,ppries/tensorflow,Xeralux/tensorflow,mrry/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,benoitsteiner/tensorflow-opencl,kobejean/tensorflow,Moriadry/tensorflow,gojira/tensorflow,JVillella/tensorflow,alheinecke/tensorflow-xsmm,renyi533/tensorflow,xzturn/tensorflow,tiagofrepereira2012/tensorflow,tongwang01/tensorflow,karllessard/tensorflow,xodus7/tensorflow,nanditav/15712-TensorFlow,freedomtan/tensorflow,ZhangXinNan/tensorflow,rabipanda/tensorflow,apark263/tensorflow,gnieboer/tensorflow,RapidApplicationDevelopment/tensorflow,Mistobaan/tensorflow,meteorcloudy/tensorflow,Moriadry/tensorflow,zasdfgbnm/tensorflow,ageron/tensorflow,kobejean/tensorflow,chris-chris/tensorflow,tillahoffmann/tensorflow,yaroslavvb/tensorflow,ran5515/DeepDecision,alheinecke/tensorflow-xsmm,mrry/tensorflow,laszlocsomor/tensorflow,apark263/tensorflow,nikste/tensorflow,sarvex/tensorflow,jbedorf/tensorflow,XueqingLin/tensorflow,markslwong/tensorflow,alheinecke/tensorflow-xsmm,anand-c-goog/tensorflow,allenlavoie/tensorflow,johndpope/tensorflow,karllessard/tensorflow,cg31/tensorflow,bowang/tensorflow,DCSaunders/tensorflow,ran5515/DeepDecision,horance-liu/tensorflow,MoamerEncsConcordiaCa/tensorflow,lukeiwanski/tensorflow,seanli9jan/tensorflow,ibab/tensorflow,anand-c-goog/tensorflow,neilhan/tensorflow,hfp/tensorflow-xsmm,DavidNorman/tensorflow,mengxn/tensorflow,cancan101/tensorflow,kamcpp/tensorflow,pavelchristof/gomoku-ai,renyi533/tensorflow,lukeiwanski/tensorflow-opencl,andrewcmyers/tensorflow,with-git/tensorflow,JVillella/tensorflow,mengxn/tensorflow,admcrae/tensorflow,paolodedios/tensorflow,xodus7/tensorflow,peterbraden/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,jeffzheng1/tensorflow,snnn/tensorflow,cg31/tensorflow,jhseu/tensorflow,nolanliou/tensorflow,manipopopo/tensorflow,ibmsoe/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-pywrap_saved_model,nightjean/Deep-Learning,brchiu/tensorflow,gunan/tensorflow,lakshayg/tensorflow,eerwitt/tensorflow,xodus7/tensorflow,adit-chandra/tensorflow,zasdfgbnm/tensorflow,allenlavoie/tensorflow,ychfan/tensorflow,dancingdan/tensorflow,frreiss/tensorflow-fred,jbedorf/tensorflow,tornadozou/tensorflow,gojira/tensorflow,jbedorf/tensorflow,naturali/tensorflow,caisq/tensorflow,ninotoshi/tensorflow,lakshayg/tensorflow,hfp/tensorflow-xsmm,nolanliou/tensorflow,laosiaudi/tensorflow,caisq/tensorflow,suiyuan2009/tensorflow,jendap/tensorflow,LUTAN/tensorflow,jbedorf/tensorflow,memo/tensorflow,xzturn/tensorflow,Intel-tensorflow/tensorflow,gibiansky/tensorflow,kchodorow/tensorflow,laszlocsomor/tensorflow,with-git/tensorflow,tornadozou/tensorflow,rabipanda/tensorflow,memo/tensorflow,alsrgv/tensorflow,tntnatbry/tensorflow,sjperkins/tensorflow,whn09/tensorflow,neilhan/tensorflow,gautam1858/tensorflow,Bismarrck/tensorflow,mengxn/tensorflow,pierreg/tensorflow,kchodorow/tensorflow,with-git/tensorflow,llhe/tensorflow,wchan/tensorflow,ZhangXinNan/tensorflow,jostep/tensorflow,calebfoss/tensorflow,davidzchen/tensorflow,yanchen036/tensorflow,guschmue/tensorflow,anand-c-goog/tensorflow,meteorcloudy/tensorflow,awni/tensorflow,ville-k/tensorflow,jeffzheng1/tensorflow,jostep/tensorflow,eadgarchen/tensorflow,mengxn/tensorflow,Bismarrck/tensorflow,yongtang/tensorflow,nightjean/Deep-Learning,zasdfgbnm/tensorflow,ville-k/tensorflow,yanchen036/tensorflow,tomasreimers/tensorflow-emscripten,unsiloai/syntaxnet-ops-hack,manjunaths/tensorflow,admcrae/tensorflow,memo/tensorflow,dhalleine/tensorflow,brchiu/tensorflow,tntnatbry/tensorflow,krikru/tensorflow-opencl,benoitsteiner/tensorflow-xsmm,abhitopia/tensorflow,asimshankar/tensorflow,nanditav/15712-TensorFlow,jostep/tensorflow,chemelnucfin/tensorflow,kevin-coder/tensorflow-fork,XueqingLin/tensorflow,naturali/tensorflow,theflofly/tensorflow,Mistobaan/tensorflow,ran5515/DeepDecision,alshedivat/tensorflow,adamtiger/tensorflow,ageron/tensorflow,tntnatbry/tensorflow,eadgarchen/tensorflow,Mazecreator/tensorflow,mavenlin/tensorflow,seanli9jan/tensorflow,abhitopia/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Xeralux/tensorflow,whn09/tensorflow,manipopopo/tensorflow,HKUST-SING/tensorflow,cxxgtxy/tensorflow,MostafaGazar/tensorflow,ychfan/tensorflow,martinwicke/tensorflow,elingg/tensorflow,johndpope/tensorflow,cancan101/tensorflow,abhitopia/tensorflow,girving/tensorflow,AnishShah/tensorflow,mortada/tensorflow,Bismarrck/tensorflow,alisidd/tensorflow,manazhao/tf_recsys,seaotterman/tensorflow,renyi533/tensorflow,asadziach/tensorflow,a-doumoulakis/tensorflow,mortada/tensorflow,gautam1858/tensorflow,tiagofrepereira2012/tensorflow,alsrgv/tensorflow,seanli9jan/tensorflow,AnishShah/tensorflow,snnn/tensorflow,codrut3/tensorflow,a-doumoulakis/tensorflow,rdipietro/tensorflow,tomasreimers/tensorflow-emscripten,benoitsteiner/tensorflow-xsmm,jhaux/tensorflow,elingg/tensorflow,laosiaudi/tensorflow,aselle/tensorflow,gunan/tensorflow,krikru/tensorflow-opencl,gnieboer/tensorflow,xodus7/tensorflow,snnn/tensorflow,petewarden/tensorflow,sjperkins/tensorflow,petewarden/tensorflow,naturali/tensorflow,hfp/tensorflow-xsmm,abhitopia/tensorflow,DavidNorman/tensorflow,llhe/tensorflow,annarev/tensorflow,yongtang/tensorflow,laosiaudi/tensorflow,llhe/tensorflow,anand-c-goog/tensorflow,wchan/tensorflow,hsaputra/tensorflow,yongtang/tensorflow,hfp/tensorflow-xsmm,mdrumond/tensorflow,chenjun0210/tensorflow,TakayukiSakai/tensorflow,whn09/tensorflow,seaotterman/tensorflow,gibiansky/tensorflow,hsaputra/tensorflow,Mistobaan/tensorflow,allenlavoie/tensorflow,hfp/tensorflow-xsmm,adit-chandra/tensorflow,maciekcc/tensorflow,eaplatanios/tensorflow,neilhan/tensorflow,DavidNorman/tensorflow,peterbraden/tensorflow,mrry/tensorflow,gunan/tensorflow,xzturn/tensorflow,JVillella/tensorflow,AnishShah/tensorflow,jart/tensorflow,scenarios/tensorflow,taknevski/tensorflow-xsmm,RapidApplicationDevelopment/tensorflow,mixturemodel-flow/tensorflow,taknevski/tensorflow-xsmm,lukeiwanski/tensorflow,a-doumoulakis/tensorflow,jostep/tensorflow,panmari/tensorflow,nikste/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ychfan/tensorflow,ZhangXinNan/tensorflow,cancan101/tensorflow,thesuperzapper/tensorflow,awni/tensorflow,awni/tensorflow,ran5515/DeepDecision,aldian/tensorflow,aam-at/tensorflow,alsrgv/tensorflow,maciekcc/tensorflow,lakshayg/tensorflow,Moriadry/tensorflow,kchodorow/tensorflow,ghchinoy/tensorflow,benoitsteiner/tensorflow-xsmm,frreiss/tensorflow-fred,dongjoon-hyun/tensorflow,karllessard/tensorflow,handroissuazo/tensorflow,mortada/tensorflow,apark263/tensorflow,ppries/tensorflow,hfp/tensorflow-xsmm,arborh/tensorflow,Mistobaan/tensorflow,nightjean/Deep-Learning,ninotoshi/tensorflow,frreiss/tensorflow-fred,ibmsoe/tensorflow,kevin-coder/tensorflow-fork,codrut3/tensorflow,raymondxyang/tensorflow,Carmezim/tensorflow,eaplatanios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,codrut3/tensorflow,raymondxyang/tensorflow,wangyum/tensorflow,rabipanda/tensorflow,av8ramit/tensorflow,chemelnucfin/tensorflow,ZhangXinNan/tensorflow,guschmue/tensorflow,tiagofrepereira2012/tensorflow,annarev/tensorflow,whn09/tensorflow,MoamerEncsConcordiaCa/tensorflow,ppries/tensorflow,jwlawson/tensorflow,benoitsteiner/tensorflow,manjunaths/tensorflow,Intel-tensorflow/tensorflow,eerwitt/tensorflow,eerwitt/tensorflow,RapidApplicationDevelopment/tensorflow,ishay2b/tensorflow,chris-chris/tensorflow,andrewcmyers/tensorflow,ychfan/tensorflow,sandeepdsouza93/TensorFlow-15712,laosiaudi/tensorflow,zasdfgbnm/tensorflow,ivano666/tensorflow,jalexvig/tensorflow,sandeepdsouza93/TensorFlow-15712,girving/tensorflow,dancingdan/tensorflow,karllessard/tensorflow,arborh/tensorflow,benoitsteiner/tensorflow-opencl,HKUST-SING/tensorflow,sarvex/tensorflow,jbedorf/tensorflow,gautam1858/tensorflow,handroissuazo/tensorflow,jostep/tensorflow,code-sauce/tensorflow,johndpope/tensorflow,alshedivat/tensorflow,ville-k/tensorflow,jendap/tensorflow,sjperkins/tensorflow,dendisuhubdy/tensorflow,drpngx/tensorflow,unsiloai/syntaxnet-ops-hack,laszlocsomor/tensorflow,chris-chris/tensorflow,zasdfgbnm/tensorflow,Intel-Corporation/tensorflow,juharris/tensorflow,yufengg/tensorflow,cxxgtxy/tensorflow,pcm17/tensorflow,yufengg/tensorflow,Intel-tensorflow/tensorflow,Mistobaan/tensorflow,yufengg/tensorflow,jeffzheng1/tensorflow,nolanliou/tensorflow,tornadozou/tensorflow,tiagofrepereira2012/tensorflow,XueqingLin/tensorflow,zasdfgbnm/tensorflow,ville-k/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,jalexvig/tensorflow,aselle/tensorflow,chris-chris/tensorflow,hfp/tensorflow-xsmm,MoamerEncsConcordiaCa/tensorflow,tensorflow/tensorflow-pywrap_saved_model,mengxn/tensorflow,apark263/tensorflow,awni/tensorflow,ninotoshi/tensorflow,AnishShah/tensorflow,suiyuan2009/tensorflow,benoitsteiner/tensorflow-xsmm,gojira/tensorflow,yaroslavvb/tensorflow,adit-chandra/tensorflow,xzturn/tensorflow,whn09/tensorflow,aldian/tensorflow,brchiu/tensorflow,mortada/tensorflow,manjunaths/tensorflow,guschmue/tensorflow,gibiansky/tensorflow,theflofly/tensorflow,Mazecreator/tensorflow,Mazecreator/tensorflow,seaotterman/tensorflow,eadgarchen/tensorflow,sjperkins/tensorflow,manazhao/tf_recsys,ppwwyyxx/tensorflow,meteorcloudy/tensorflow,ArtsiomCh/tensorflow,thesuperzapper/tensorflow,martinwicke/tensorflow,mdrumond/tensorflow,panmari/tensorflow,nburn42/tensorflow,chris-chris/tensorflow,ZhangXinNan/tensorflow,JingJunYin/tensorflow,elingg/tensorflow,a-doumoulakis/tensorflow,manipopopo/tensorflow,arborh/tensorflow,rdipietro/tensorflow,dongjoon-hyun/tensorflow,naturali/tensorflow,SnakeJenny/TensorFlow,ghchinoy/tensorflow,asimshankar/tensorflow,benoitsteiner/tensorflow,kamcpp/tensorflow,thjashin/tensorflow,kevin-coder/tensorflow-fork,AndreasMadsen/tensorflow,petewarden/tensorflow_makefile,unsiloai/syntaxnet-ops-hack,Bismarrck/tensorflow,aselle/tensorflow,jhaux/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,anilmuthineni/tensorflow,pcm17/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,horance-liu/tensorflow,MostafaGazar/tensorflow,JingJunYin/tensorflow,nikste/tensorflow,aam-at/tensorflow,MoamerEncsConcordiaCa/tensorflow,hehongliang/tensorflow,davidzchen/tensorflow,pcm17/tensorflow,jendap/tensorflow,awni/tensorflow,Kongsea/tensorflow,Bulochkin/tensorflow_pack,tensorflow/tensorflow,asadziach/tensorflow,pcm17/tensorflow,laosiaudi/tensorflow,johndpope/tensorflow,DavidNorman/tensorflow,Bismarrck/tensorflow,allenlavoie/tensorflow,dyoung418/tensorflow,zycdragonball/tensorflow,HKUST-SING/tensorflow,with-git/tensorflow,naturali/tensorflow,tiagofrepereira2012/tensorflow,juharris/tensorflow,benoitsteiner/tensorflow-xsmm,mixturemodel-flow/tensorflow,thesuperzapper/tensorflow,alshedivat/tensorflow,mortada/tensorflow,benoitsteiner/tensorflow-opencl,tensorflow/tensorflow,MycChiu/tensorflow,jhseu/tensorflow,markslwong/tensorflow,taknevski/tensorflow-xsmm,Bulochkin/tensorflow_pack,MostafaGazar/tensorflow,eaplatanios/tensorflow,seaotterman/tensorflow,petewarden/tensorflow,sandeepgupta2k4/tensorflow,memo/tensorflow,Kongsea/tensorflow,mixturemodel-flow/tensorflow,vrv/tensorflow,rabipanda/tensorflow,renyi533/tensorflow,alisidd/tensorflow,frreiss/tensorflow-fred,tiagofrepereira2012/tensorflow,frreiss/tensorflow-fred,guschmue/tensorflow,adamtiger/tensorflow,ArtsiomCh/tensorflow,markslwong/tensorflow,alsrgv/tensorflow,MoamerEncsConcordiaCa/tensorflow,ishay2b/tensorflow,memo/tensorflow,Xeralux/tensorflow,dendisuhubdy/tensorflow,dhalleine/tensorflow,scenarios/tensorflow,drpngx/tensorflow,yaroslavvb/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-pywrap_saved_model,juharris/tensorflow,Xeralux/tensorflow,unsiloai/syntaxnet-ops-hack,guschmue/tensorflow,handroissuazo/tensorflow,TakayukiSakai/tensorflow,rabipanda/tensorflow,aselle/tensorflow,odejesush/tensorflow,vrv/tensorflow,jart/tensorflow,nolanliou/tensorflow,elingg/tensorflow,eaplatanios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,LUTAN/tensorflow,manjunaths/tensorflow,aselle/tensorflow,Kongsea/tensorflow,laszlocsomor/tensorflow,HaebinShin/tensorflow,code-sauce/tensorflow,codrut3/tensorflow,gautam1858/tensorflow,AndreasMadsen/tensorflow,ppries/tensorflow,cxxgtxy/tensorflow,horance-liu/tensorflow,alisidd/tensorflow,nburn42/tensorflow,xzturn/tensorflow,arborh/tensorflow,HKUST-SING/tensorflow,rdipietro/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jalexvig/tensorflow,bowang/tensorflow,dancingdan/tensorflow,sandeepgupta2k4/tensorflow,benoitsteiner/tensorflow-xsmm,martinwicke/tensorflow,karllessard/tensorflow,HKUST-SING/tensorflow,EvenStrangest/tensorflow,aam-at/tensorflow,sandeepdsouza93/TensorFlow-15712,hehongliang/tensorflow,tensorflow/tensorflow,HKUST-SING/tensorflow,petewarden/tensorflow,Intel-Corporation/tensorflow,scenarios/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,anilmuthineni/tensorflow,aam-at/tensorflow,dongjoon-hyun/tensorflow,caisq/tensorflow,cg31/tensorflow,girving/tensorflow,gojira/tensorflow,EvenStrangest/tensorflow,sjperkins/tensorflow,RapidApplicationDevelopment/tensorflow,benoitsteiner/tensorflow-xsmm,peterbraden/tensorflow,pavelchristof/gomoku-ai,paolodedios/tensorflow,pavelchristof/gomoku-ai,jalexvig/tensorflow,admcrae/tensorflow,kobejean/tensorflow,johndpope/tensorflow,mixturemodel-flow/tensorflow,dyoung418/tensorflow,a-doumoulakis/tensorflow,alsrgv/tensorflow,adit-chandra/tensorflow,chemelnucfin/tensorflow,gibiansky/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,rdipietro/tensorflow,meteorcloudy/tensorflow,laszlocsomor/tensorflow,caisq/tensorflow,ArtsiomCh/tensorflow,ghchinoy/tensorflow,allenlavoie/tensorflow,ppwwyyxx/tensorflow,dongjoon-hyun/tensorflow,andrewcmyers/tensorflow,nburn42/tensorflow,gunan/tensorflow,renyi533/tensorflow,arborh/tensorflow,Mazecreator/tensorflow,lukeiwanski/tensorflow,calebfoss/tensorflow,nolanliou/tensorflow,theflofly/tensorflow,neilhan/tensorflow,drpngx/tensorflow,suiyuan2009/tensorflow,MycChiu/tensorflow,LUTAN/tensorflow,cxxgtxy/tensorflow,gibiansky/tensorflow,odejesush/tensorflow,Kongsea/tensorflow,girving/tensorflow,juharris/tensorflow,jhaux/tensorflow,av8ramit/tensorflow,JingJunYin/tensorflow,alshedivat/tensorflow,haeusser/tensorflow,JingJunYin/tensorflow,jart/tensorflow,kchodorow/tensorflow,admcrae/tensorflow,gautam1858/tensorflow,MostafaGazar/tensorflow,petewarden/tensorflow_makefile,hehongliang/tensorflow,code-sauce/tensorflow,zasdfgbnm/tensorflow,juharris/tensorflow,gautam1858/tensorflow,adamtiger/tensorflow,chemelnucfin/tensorflow,lakshayg/tensorflow,ville-k/tensorflow,benoitsteiner/tensorflow,eaplatanios/tensorflow,sandeepgupta2k4/tensorflow,zycdragonball/tensorflow,mavenlin/tensorflow,meteorcloudy/tensorflow,krikru/tensorflow-opencl,alsrgv/tensorflow,adit-chandra/tensorflow,asadziach/tensorflow,dhalleine/tensorflow,kamcpp/tensorflow,johndpope/tensorflow,peterbraden/tensorflow,SnakeJenny/TensorFlow,peterbraden/tensorflow,eaplatanios/tensorflow,kevin-coder/tensorflow-fork,strint/tensorflow,av8ramit/tensorflow,panmari/tensorflow,chris-chris/tensorflow,meteorcloudy/tensorflow,SnakeJenny/TensorFlow,jbedorf/tensorflow,tillahoffmann/tensorflow,with-git/tensorflow,seanli9jan/tensorflow,sarvex/tensorflow,thjashin/tensorflow,ibmsoe/tensorflow,MostafaGazar/tensorflow,rabipanda/tensorflow,unsiloai/syntaxnet-ops-hack,ghchinoy/tensorflow,kobejean/tensorflow,asadziach/tensorflow,AnishShah/tensorflow,ageron/tensorflow,JingJunYin/tensorflow,ninotoshi/tensorflow,Mazecreator/tensorflow,yanchen036/tensorflow,calebfoss/tensorflow,alistairlow/tensorflow,calebfoss/tensorflow,yaroslavvb/tensorflow,panmari/tensorflow,wchan/tensorflow,sandeepgupta2k4/tensorflow,markslwong/tensorflow,manjunaths/tensorflow,davidzchen/tensorflow,Bulochkin/tensorflow_pack,yanchen036/tensorflow,theflofly/tensorflow,peterbraden/tensorflow,eaplatanios/tensorflow,gnieboer/tensorflow,alheinecke/tensorflow-xsmm,girving/tensorflow,gibiansky/tensorflow,Mazecreator/tensorflow,ArtsiomCh/tensorflow,tongwang01/tensorflow,eadgarchen/tensorflow,girving/tensorflow,sarvex/tensorflow,tiagofrepereira2012/tensorflow,jeffzheng1/tensorflow,ishay2b/tensorflow,yongtang/tensorflow,kobejean/tensorflow,frreiss/tensorflow-fred,chemelnucfin/tensorflow,ZhangXinNan/tensorflow,HaebinShin/tensorflow,drpngx/tensorflow,alisidd/tensorflow,DCSaunders/tensorflow,neilhan/tensorflow,xodus7/tensorflow,annarev/tensorflow,juharris/tensorflow,naturali/tensorflow,hsaputra/tensorflow,adamtiger/tensorflow,apark263/tensorflow,Bulochkin/tensorflow_pack,tensorflow/tensorflow-experimental_link_static_libraries_once,code-sauce/tensorflow,mdrumond/tensorflow,MostafaGazar/tensorflow,JVillella/tensorflow,ychfan/tensorflow,thesuperzapper/tensorflow,horance-liu/tensorflow,TakayukiSakai/tensorflow,pcm17/tensorflow,horance-liu/tensorflow,maciekcc/tensorflow,asimshankar/tensorflow,unsiloai/syntaxnet-ops-hack,kchodorow/tensorflow,av8ramit/tensorflow,laszlocsomor/tensorflow,markslwong/tensorflow,SnakeJenny/TensorFlow,asadziach/tensorflow,sandeepgupta2k4/tensorflow,maciekcc/tensorflow,tensorflow/tensorflow,jendap/tensorflow,kamcpp/tensorflow,strint/tensorflow,davidzchen/tensorflow,johndpope/tensorflow,Kongsea/tensorflow,mavenlin/tensorflow,tensorflow/tensorflow,dendisuhubdy/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,mdrumond/tensorflow,ibmsoe/tensorflow,jhseu/tensorflow,tensorflow/skflow,jhseu/tensorflow,yaroslavvb/tensorflow,alivecor/tensorflow,gunan/tensorflow,TakayukiSakai/tensorflow,laosiaudi/tensorflow,kchodorow/tensorflow,dhalleine/tensorflow,scenarios/tensorflow,ran5515/DeepDecision,aselle/tensorflow,apark263/tensorflow,eadgarchen/tensorflow,AndreasMadsen/tensorflow,Bulochkin/tensorflow_pack,DCSaunders/tensorflow,awni/tensorflow,petewarden/tensorflow,petewarden/tensorflow,benoitsteiner/tensorflow-xsmm,alistairlow/tensorflow,lukeiwanski/tensorflow,brchiu/tensorflow,nanditav/15712-TensorFlow,asimshankar/tensorflow,caisq/tensorflow,Intel-tensorflow/tensorflow,aselle/tensorflow,benoitsteiner/tensorflow,ageron/tensorflow,mrry/tensorflow,panmari/tensorflow,laszlocsomor/tensorflow,nikste/tensorflow,scenarios/tensorflow,rabipanda/tensorflow,gibiansky/tensorflow,abhitopia/tensorflow,dendisuhubdy/tensorflow,aselle/tensorflow,jeffzheng1/tensorflow,freedomtan/tensorflow,pierreg/tensorflow,tornadozou/tensorflow,ivano666/tensorflow,suiyuan2009/tensorflow,zasdfgbnm/tensorflow,jwlawson/tensorflow,paolodedios/tensorflow,Mistobaan/tensorflow,chris-chris/tensorflow,llhe/tensorflow,wchan/tensorflow,manipopopo/tensorflow,rdipietro/tensorflow,petewarden/tensorflow,jart/tensorflow,theflofly/tensorflow,tillahoffmann/tensorflow,tensorflow/tensorflow,raymondxyang/tensorflow,guschmue/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,allenlavoie/tensorflow,benoitsteiner/tensorflow,yongtang/tensorflow,DavidNorman/tensorflow,abhitopia/tensorflow,hsaputra/tensorflow,MoamerEncsConcordiaCa/tensorflow,yaroslavvb/tensorflow,theflofly/tensorflow,zycdragonball/tensorflow,xodus7/tensorflow,wangyum/tensorflow,markslwong/tensorflow,AndreasMadsen/tensorflow,yanchen036/tensorflow,memo/tensorflow,odejesush/tensorflow,MycChiu/tensorflow,brchiu/tensorflow,petewarden/tensorflow_makefile,aam-at/tensorflow,dansbecker/skflow,tntnatbry/tensorflow,llhe/tensorflow,sjperkins/tensorflow,alsrgv/tensorflow,ibmsoe/tensorflow,benoitsteiner/tensorflow,petewarden/tensorflow_makefile,rdipietro/tensorflow,manipopopo/tensorflow,gunan/tensorflow,asimshankar/tensorflow,kobejean/tensorflow,mortada/tensorflow,jart/tensorflow,aldian/tensorflow,jostep/tensorflow,thjashin/tensorflow,annarev/tensorflow,sandeepgupta2k4/tensorflow,sjperkins/tensorflow,Moriadry/tensorflow,freedomtan/tensorflow,snnn/tensorflow,Bismarrck/tensorflow,RapidApplicationDevelopment/tensorflow,andrewcmyers/tensorflow,wangyum/tensorflow,brchiu/tensorflow,DCSaunders/tensorflow,haeusser/tensorflow,eadgarchen/tensorflow,aselle/tensorflow,sandeepgupta2k4/tensorflow,renyi533/tensorflow,wchan/tensorflow,elingg/tensorflow,zycdragonball/tensorflow,EvenStrangest/tensorflow,nolanliou/tensorflow,martinwicke/tensorflow,caisq/tensorflow,DCSaunders/tensorflow,arborh/tensorflow,xzturn/tensorflow,thjashin/tensorflow,Bulochkin/tensorflow_pack,tntnatbry/tensorflow,av8ramit/tensorflow,tornadozou/tensorflow,Intel-tensorflow/tensorflow,arborh/tensorflow,wangyum/tensorflow,av8ramit/tensorflow,petewarden/tensorflow,snnn/tensorflow,cancan101/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,elingg/tensorflow,taknevski/tensorflow-xsmm,arborh/tensorflow,gojira/tensorflow,benoitsteiner/tensorflow-xsmm,alheinecke/tensorflow-xsmm,llhe/tensorflow,dongjoon-hyun/tensorflow,thjashin/tensorflow,krikru/tensorflow-opencl,brchiu/tensorflow,aam-at/tensorflow,mixturemodel-flow/tensorflow,cg31/tensorflow,ageron/tensorflow,MycChiu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,nightjean/Deep-Learning,Mistobaan/tensorflow,ArtsiomCh/tensorflow,anilmuthineni/tensorflow,ville-k/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,dendisuhubdy/tensorflow,alistairlow/tensorflow,dancingdan/tensorflow,sjperkins/tensorflow,MycChiu/tensorflow,yongtang/tensorflow,eerwitt/tensorflow,haeusser/tensorflow,mrry/tensorflow,horance-liu/tensorflow,tornadozou/tensorflow,tomasreimers/tensorflow-emscripten,kamcpp/tensorflow,karllessard/tensorflow,dongjoon-hyun/tensorflow,xodus7/tensorflow,codrut3/tensorflow,HaebinShin/tensorflow,hehongliang/tensorflow,strint/tensorflow,code-sauce/tensorflow,tornadozou/tensorflow,RapidApplicationDevelopment/tensorflow,annarev/tensorflow,RapidApplicationDevelopment/tensorflow,aldian/tensorflow,cg31/tensorflow,davidzchen/tensorflow,alshedivat/tensorflow,gunan/tensorflow,alivecor/tensorflow,pierreg/tensorflow,mdrumond/tensorflow,mortada/tensorflow,dhalleine/tensorflow,jendap/tensorflow,thesuperzapper/tensorflow,wangyum/tensorflow,AndreasMadsen/tensorflow,scenarios/tensorflow,alshedivat/tensorflow,RapidApplicationDevelopment/tensorflow,pcm17/tensorflow,ibab/tensorflow,nanditav/15712-TensorFlow,lukeiwanski/tensorflow,allenlavoie/tensorflow,andrewcmyers/tensorflow,jhseu/tensorflow,anand-c-goog/tensorflow,rabipanda/tensorflow,dongjoon-hyun/tensorflow,markslwong/tensorflow,andrewcmyers/tensorflow,mdrumond/tensorflow,HaebinShin/tensorflow,jhaux/tensorflow,alisidd/tensorflow
--- +++ @@ -21,11 +21,21 @@ class DropoutTest(tf.test.TestCase): def test_dropout_float(self): - with self.test_session(): + with self.test_session() as session: x = tf.placeholder(tf.float32, [5, 5]) y = ops.dropout(x, 0.5) probs = tf.get_collection(ops.DROPOUTS) + session.run(tf.initialize_all_variables()) self.assertEqual(len(probs), 1) + self.assertEqual(session.run(probs[0]), 0.5) + + def test_dropout_tensor(self): + with self.test_session(): + x = tf.placeholder(tf.float32, [5, 5]) + y = tf.get_variable("prob", [], initializer=tf.constant_initializer(0.5)) + z = ops.dropout(x, y) + probs = tf.get_collection(ops.DROPOUTS) + self.assertEqual(probs, [y]) if __name__ == '__main__':
0e83eff1b63eeb5203b4abc55e6278fc411567ae
setup.py
setup.py
from os.path import dirname, join from setuptools import find_packages, setup from channels_redis import __version__ # We use the README as the long_description readme = open(join(dirname(__file__), "README.rst")).read() crypto_requires = ["cryptography>=1.3.0"] test_requires = crypto_requires + [ "pytest>=3.0", "pytest-asyncio~=0.8", "async_generator~=1.8", "async-timeout~=2.0", ] setup( name="channels_redis", version=__version__, url="http://github.com/django/channels_redis/", author="Django Software Foundation", author_email="foundation@djangoproject.com", description="Redis-backed ASGI channel layer implementation", long_description=readme, license="BSD", zip_safe=False, packages=find_packages(exclude=["tests"]), include_package_data=True, install_requires=[ "aioredis~=1.0.0", "msgpack~=0.5.0", "asgiref~=2.0.1", "channels~=2.0.0", ], extras_require={ "cryptography": crypto_requires, "tests": test_requires, }, )
from os.path import dirname, join from setuptools import find_packages, setup from channels_redis import __version__ # We use the README as the long_description readme = open(join(dirname(__file__), "README.rst")).read() crypto_requires = ["cryptography>=1.3.0"] test_requires = crypto_requires + [ "pytest>=3.0", "pytest-asyncio~=0.8", "async_generator~=1.8", "async-timeout~=2.0", ] setup( name="channels_redis", version=__version__, url="http://github.com/django/channels_redis/", author="Django Software Foundation", author_email="foundation@djangoproject.com", description="Redis-backed ASGI channel layer implementation", long_description=readme, license="BSD", zip_safe=False, packages=find_packages(exclude=["tests"]), include_package_data=True, install_requires=[ "aioredis~=1.0.0", "msgpack~=0.5.0", "asgiref~=2.1", "channels~=2.0", ], extras_require={ "cryptography": crypto_requires, "tests": test_requires, }, )
Update dependencies to be more loose (and match Channels)
Update dependencies to be more loose (and match Channels)
Python
bsd-3-clause
django/asgi_redis
--- +++ @@ -32,8 +32,8 @@ install_requires=[ "aioredis~=1.0.0", "msgpack~=0.5.0", - "asgiref~=2.0.1", - "channels~=2.0.0", + "asgiref~=2.1", + "channels~=2.0", ], extras_require={ "cryptography": crypto_requires,
0ba4eb026c34b85e0413bc83015398eeb33e6547
setup.py
setup.py
from setuptools import setup, find_packages setup( name = 'thespian', version = '2.1.4', description = 'Python Actor concurrency library', author = 'Kevin Quick', author_email = 'kquick@godaddy.com', url = 'http://thespianpy.com', license = 'MIT', scripts = [ 'thespianShell.py' ], packages = find_packages(exclude=['thespian/test']), classifiers = [ 'Environment :: Library', 'Intended Audience :: Developers', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], long_description = ''' Thespian is a Python library providing a framework for developing concurrent, distributed, fault tolerant applications. Thespian is built on the Actor Model which allows applications to be written as a group of independently executing but cooperating "Actors" which communicate via messages. These Actors run within the Actor System provided by the Thespian library. * Concurrent * Distributed * Fault Tolerant * Scalable * Location independent Actor programming is broadly applicable and it is ideally suited for Cloud-based applications as well, where compute nodes are added and removed from the environment dynamically. ''' )
from setuptools import setup, find_packages setup( name = 'thespian', version = '2.1.4', description = 'Python Actor concurrency library', author = 'Kevin Quick', author_email = 'kquick@godaddy.com', url = 'http://thespianpy.com', license = 'MIT', scripts = [ 'thespianShell.py' ], packages = find_packages(exclude=['thespian/test']), classifiers = [ 'Development Status :: 3 - Production/Stable', 'Environment :: Library', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: System :: Distributed Computing', ], long_description = ''' Thespian is a Python library providing a framework for developing concurrent, distributed, fault tolerant applications. Thespian is built on the Actor Model which allows applications to be written as a group of independently executing but cooperating "Actors" which communicate via messages. These Actors run within the Actor System provided by the Thespian library. * Concurrent * Distributed * Fault Tolerant * Scalable * Location independent Actor programming is broadly applicable and it is ideally suited for Cloud-based applications as well, where compute nodes are added and removed from the environment dynamically. ''' )
Add more classifiers, including Python 3.3 and 3.4 declarations.
Add more classifiers, including Python 3.3 and 3.4 declarations.
Python
mit
kquick/Thespian,godaddy/Thespian,kquick/Thespian,godaddy/Thespian
--- +++ @@ -11,13 +11,20 @@ scripts = [ 'thespianShell.py' ], packages = find_packages(exclude=['thespian/test']), classifiers = [ + 'Development Status :: 3 - Production/Stable', 'Environment :: Library', 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Topic :: System :: Distributed Computing', ], long_description = '''
da765334ffbc87c5cc20c8b3cdf7e31768c97bf7
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages from tornado_redis_sentinel import __version__ tests_require = [ 'mock', 'nose', 'coverage', 'yanc', 'preggy', 'tox', 'ipdb', 'coveralls', ] setup( name='tornado-redis-sentinel', version=__version__, description='Tornado redis library based in toredis that supports sentinel connections.', long_description=''' Tornado redis library based in toredis that supports sentinel connections. ''', keywords='tornado redis sentinel', author='Globo.com', author_email='timehome@corp.globo.com', url='', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: MacOS', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', ], packages=find_packages(), include_package_data=True, install_requires=[ 'tornado>3.2,<4.0', 'toredis', 'six' ], extras_require={ 'tests': tests_require, } )
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup, find_packages from tornado_redis_sentinel import __version__ tests_require = [ 'mock', 'nose', 'coverage', 'yanc', 'preggy', 'tox', 'ipdb', 'coveralls', ] setup( name='tornado-redis-sentinel', version=__version__, description='Tornado redis library based in toredis that supports sentinel connections.', long_description=''' Tornado redis library based in toredis that supports sentinel connections. ''', keywords='tornado redis sentinel', author='Globo.com', author_email='timehome@corp.globo.com', url='', license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: MacOS', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', ], packages=find_packages(), include_package_data=True, install_requires=[ 'tornado>3.2', 'toredis', 'six' ], extras_require={ 'tests': tests_require, } )
Remove upper limit for tornado's version
Remove upper limit for tornado's version My use case seems to work with tornado 4.0.2.
Python
mit
ms7s/tornado-redis-sentinel
--- +++ @@ -41,7 +41,7 @@ packages=find_packages(), include_package_data=True, install_requires=[ - 'tornado>3.2,<4.0', + 'tornado>3.2', 'toredis', 'six' ],
565c66982965c5abbc88083fa505efb4ce8a92c0
setup.py
setup.py
# # This file is part of gruvi. Gruvi is free software available under the terms # of the MIT license. See the file "LICENSE" that was provided together with # this source file for the licensing terms. # # Copyright (c) 2012 the gruvi authors. See the file "AUTHORS" for a complete # list. from setuptools import setup version_info = { 'name': 'gruvi', 'version': '0.1', 'description': 'Synchronous evented IO', 'author': 'Geert Jansen', 'author_email': 'geertj@gmail.com', 'url': 'https://github.com/geertj/gruvi', 'license': 'MIT', 'classifiers': [ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3' ] } setup( package_dir = { '': 'lib' }, packages = [ 'gruvi', 'gruvi.test' ], requires = ['greenlet'], install_requires = ['setuptools'], **version_info )
# # This file is part of gruvi. Gruvi is free software available under the terms # of the MIT license. See the file "LICENSE" that was provided together with # this source file for the licensing terms. # # Copyright (c) 2012 the gruvi authors. See the file "AUTHORS" for a complete # list. from setuptools import setup version_info = { 'name': 'gruvi', 'version': '0.1', 'description': 'Synchronous evented IO', 'author': 'Geert Jansen', 'author_email': 'geertj@gmail.com', 'url': 'https://github.com/geertj/gruvi', 'license': 'MIT', 'classifiers': [ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3' ] } setup( package_dir = { '': 'lib' }, packages = [ 'gruvi', 'gruvi.test' ], requires = ['greenlet'], install_requires = ['setuptools'], test_suite = 'nose.collector', **version_info )
Use nose as the test driver.
Use nose as the test driver.
Python
mit
swegener/gruvi,geertj/gruvi,geertj/gruvi,swegener/gruvi
--- +++ @@ -33,5 +33,6 @@ packages = [ 'gruvi', 'gruvi.test' ], requires = ['greenlet'], install_requires = ['setuptools'], + test_suite = 'nose.collector', **version_info )
d4feda3b91585576708f56ebc8f2c8592f877e2c
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup with open('README.rst') as file: long_description = file.read() setup(name='parmap', version='1.5.1.9000', description=('map and starmap implementations passing additional ' 'arguments and parallelizing if possible'), long_description=long_description, author='Sergio Oller', license='APACHE-2.0', author_email='sergioller@gmail.com', url='https://github.com/zeehio/parmap', packages=['parmap'], extras_require = { 'progress_bar': ["tqdm>=4.8.4"], }, test_suite = "test_parmap", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' ], )
#!/usr/bin/env python from setuptools import setup with open('README.rst') as file: long_description = file.read() setup(name='parmap', version='1.5.1.9000', description=('map and starmap implementations passing additional ' 'arguments and parallelizing if possible'), long_description=long_description, long_description_content_type = "text/x-rst", author='Sergio Oller', license='APACHE-2.0', author_email='sergioller@gmail.com', url='https://github.com/zeehio/parmap', packages=['parmap'], extras_require = { 'progress_bar': ["tqdm>=4.8.4"], }, test_suite = "test_parmap", classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' ], )
Add long description content type
Add long description content type
Python
apache-2.0
zeehio/parmap
--- +++ @@ -10,6 +10,7 @@ description=('map and starmap implementations passing additional ' 'arguments and parallelizing if possible'), long_description=long_description, + long_description_content_type = "text/x-rst", author='Sergio Oller', license='APACHE-2.0', author_email='sergioller@gmail.com',
29600a6a8e8fa17e1c5b9f53dde57167450cbf4d
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from distutils.core import setup # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='django-zxcvbn-password', version='1.0.3.0', packages=['zxcvbn_password'], package_data={'': '*.js'}, include_package_data=True, license='BSD License', author='Timothée Mazzucotelli', author_email='timothee.mazzucotelli@gmail.com', url='https://github.com/Pawamoy/django-zxcvbn-password', # download_url = 'https://github.com/Pawamoy/django-zxcvbn-password/tarball/1.0.2', keywords="password validation front back zxcvbn confirmation field", description="A front-end and back-end password validation field using ZXCVBN.", classifiers=[ "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Programming Language :: Python", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", ] )
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from distutils.core import setup # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='django-zxcvbn-password', version='1.0.3.0', packages=['zxcvbn_password'], package_data={'': ['*.js']}, include_package_data=True, license='BSD License', author='Timothée Mazzucotelli', author_email='timothee.mazzucotelli@gmail.com', url='https://github.com/Pawamoy/django-zxcvbn-password', # download_url = 'https://github.com/Pawamoy/django-zxcvbn-password/tarball/1.0.2', keywords="password validation front back zxcvbn confirmation field", description="A front-end and back-end password validation field using ZXCVBN.", classifiers=[ "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Programming Language :: Python", "Operating System :: OS Independent", "License :: OSI Approved :: BSD License", ] )
Fix package data (str to [])
Fix package data (str to [])
Python
isc
Pawamoy/django-zxcvbn-password,Pawamoy/django-zxcvbn-password,Pawamoy/django-zxcvbn-password
--- +++ @@ -11,7 +11,7 @@ name='django-zxcvbn-password', version='1.0.3.0', packages=['zxcvbn_password'], - package_data={'': '*.js'}, + package_data={'': ['*.js']}, include_package_data=True, license='BSD License',
9260852d12ca686843ba30f3786b542e6418e4ff
setup.py
setup.py
import os import sys import imp from setuptools import find_packages try: from restricted_pkg import setup except: # allow falling back to setuptools only if # we are not trying to upload if 'upload' in sys.argv: raise ImportError('restricted_pkg is required to upload, first do pip install restricted_pkg') from setuptools import setup requirements = imp.load_source('requirements', os.path.realpath('requirements.py')) setup( name='dusty', version='0.0.1', description='Docker-based development environment manager', url='https://github.com/gamechanger/dusty', private_repository='gamechanger', author='GameChanger', author_email='travis@gamechanger.io', packages=find_packages(), install_requires=requirements.install_requires, tests_require=requirements.test_requires, test_suite="nose.collector", entry_points={'console_scripts': ['dustyd = dusty.daemon:main', 'dusty = dusty.cli.__init__:main']}, zip_safe=False )
### GAMECHANGER_CI_PREVENT_BUILD import os import sys import imp from setuptools import find_packages try: from restricted_pkg import setup except: # allow falling back to setuptools only if # we are not trying to upload if 'upload' in sys.argv: raise ImportError('restricted_pkg is required to upload, first do pip install restricted_pkg') from setuptools import setup requirements = imp.load_source('requirements', os.path.realpath('requirements.py')) setup( name='dusty', version='0.0.1', description='Docker-based development environment manager', url='https://github.com/gamechanger/dusty', private_repository='gamechanger', author='GameChanger', author_email='travis@gamechanger.io', packages=find_packages(), install_requires=requirements.install_requires, tests_require=requirements.test_requires, test_suite="nose.collector", entry_points={'console_scripts': ['dustyd = dusty.daemon:main', 'dusty = dusty.cli.__init__:main']}, zip_safe=False )
Add magic comment to stop our CI from building a package
Add magic comment to stop our CI from building a package
Python
mit
gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty
--- +++ @@ -1,3 +1,4 @@ +### GAMECHANGER_CI_PREVENT_BUILD import os import sys import imp
7db466f94fbca57482947fa982f56ae77b11f9fa
setup.py
setup.py
#!/usr/bin/env python """ Setup file for toolaudit. """ import codecs import os from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) def read(filename): """ Read the contents of the files listed in filenames and return it as a string. """ return codecs.open(os.path.join(here, filename), 'r').read() setup( name='toolaudit', version='0.0.3', packages=['toolaudit'], install_requires=[ "argparse>=1.3.0", "PyYAML>=3.11", "six>=1.9.0" ], entry_points={ 'console_scripts': [ 'toolaudit = toolaudit:main', ], }, author='Jon Stutters', author_email='j.stutters@ucl.ac.uk', description='Report on the tools used in your software pipeline.', long_description=read('README.rst'), url='https://github.com/jstutters/toolaudit', include_package_data=True, license='MIT', classifiers=[ 'Programming Language :: Python', 'Development Status :: 3 - Alpha', 'Natural Language :: English', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Topic :: System :: Systems Administration' ] )
#!/usr/bin/env python """ Setup file for toolaudit. """ import codecs import os from setuptools import setup here = os.path.abspath(os.path.dirname(__file__)) def read(filename): """ Read the contents of the files listed in filenames and return it as a string. """ return codecs.open(os.path.join(here, filename), 'r').read() setup( name='toolaudit', version='0.0.3', packages=['toolaudit'], zip_safe=False, install_requires=[ "argparse>=1.3.0", "PyYAML>=3.11", "six>=1.9.0" ], entry_points={ 'console_scripts': [ 'toolaudit = toolaudit:main', ], }, author='Jon Stutters', author_email='j.stutters@ucl.ac.uk', description='Report on the tools used in your software pipeline.', long_description=read('README.rst'), url='https://github.com/jstutters/toolaudit', include_package_data=True, license='MIT', classifiers=[ 'Programming Language :: Python', 'Development Status :: 3 - Alpha', 'Natural Language :: English', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Topic :: System :: Systems Administration' ] )
Set zip_safe to False to eliminate a variable getting the tests to run
Set zip_safe to False to eliminate a variable getting the tests to run
Python
mit
jstutters/toolaudit
--- +++ @@ -23,6 +23,7 @@ name='toolaudit', version='0.0.3', packages=['toolaudit'], + zip_safe=False, install_requires=[ "argparse>=1.3.0", "PyYAML>=3.11",
ad855c1466cfa374aad397fcc3f0e72551e4133f
setup.py
setup.py
from setuptools import setup setup(name = 'OWSLib', version = '0.1.0', description = 'OGC Web Service utility library', license = 'GPL', keywords = 'gis ogc ows wfs wms capabilities metadata', author = 'Sean Gillies', author_email = 'sgillies@frii.com', maintainer = 'Sean Gillies', maintainer_email = 'sgillies@frii.com', url = 'http://trac.gispython.org/projects/PCL/wiki/OwsLib', packages = ['owslib'], classifiers = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: GIS', ], )
from setuptools import setup setup(name = 'OWSLib', version = '0.2.0', description = 'OGC Web Service utility library', license = 'BSD', keywords = 'gis ogc ows wfs wms capabilities metadata', author = 'Sean Gillies', author_email = 'sgillies@frii.com', maintainer = 'Sean Gillies', maintainer_email = 'sgillies@frii.com', url = 'http://trac.gispython.org/projects/PCL/wiki/OwsLib', packages = ['owslib'], classifiers = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: GIS', ], )
Change version and license for 0.2
Change version and license for 0.2
Python
bsd-3-clause
dblodgett-usgs/OWSLib,geographika/OWSLib,bird-house/OWSLib,ocefpaf/OWSLib,daf/OWSLib,kalxas/OWSLib,tomkralidis/OWSLib,jaygoldfinch/OWSLib,mbertrand/OWSLib,KeyproOy/OWSLib,Jenselme/OWSLib,menegon/OWSLib,jachym/OWSLib,kwilcox/OWSLib,jaygoldfinch/OWSLib,JuergenWeichand/OWSLib,b-cube/OWSLib,gfusca/OWSLib,robmcmullen/OWSLib,daf/OWSLib,QuLogic/OWSLib,datagovuk/OWSLib,datagovuk/OWSLib,daf/OWSLib,datagovuk/OWSLib,geopython/OWSLib
--- +++ @@ -2,9 +2,9 @@ from setuptools import setup setup(name = 'OWSLib', - version = '0.1.0', + version = '0.2.0', description = 'OGC Web Service utility library', - license = 'GPL', + license = 'BSD', keywords = 'gis ogc ows wfs wms capabilities metadata', author = 'Sean Gillies', author_email = 'sgillies@frii.com', @@ -16,7 +16,7 @@ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU General Public License (GPL)', + 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: GIS',
5b2691dcc89fb9d9b7b1db77e9512976950adf45
setup.py
setup.py
"""Package Keysmith.""" import codecs import os.path import setuptools # type: ignore import keysmith # This project only depends on the standard library. def read(*parts): """Read a file in this repository.""" here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, *parts), 'r') as file_: return file_.read() ENTRY_POINTS = { 'console_scripts': [ '{name}={module}:{function}'.format( name=keysmith.CONSOLE_SCRIPT, module=keysmith.__name__, function=keysmith.main.__name__, ), ], } if __name__ == '__main__': setuptools.setup( name=keysmith.__name__, version=keysmith.__version__, description='Passphrase Generator', long_description=read('README.rst'), author='David Tucker', author_email='david@tucker.name', license='BSD 3-Clause License', url='https://github.com/dmtucker/keysmith', python_requires='~=3.5', py_modules=[keysmith.__name__], entry_points=ENTRY_POINTS, keywords='diceware generator keygen passphrase password', classifiers=['Development Status :: 5 - Production/Stable'], )
"""Package Keysmith.""" import codecs import os.path import setuptools # type: ignore import keysmith # This project only depends on the standard library. def read(*parts): """Read a file in this repository.""" here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, *parts), 'r') as file_: return file_.read() ENTRY_POINTS = { 'console_scripts': [ '{name}={module}:{function}'.format( name=keysmith.CONSOLE_SCRIPT, module=keysmith.__name__, function=keysmith.main.__name__, ), ], } if __name__ == '__main__': setuptools.setup( name=keysmith.__name__, version=keysmith.__version__, description='Passphrase Generator', long_description=read('README.rst'), author='David Tucker', author_email='david@tucker.name', license='BSD 3-Clause License', url='https://github.com/dmtucker/keysmith', python_requires='~=3.5', py_modules=[keysmith.__name__], entry_points=ENTRY_POINTS, keywords='diceware generator keygen passphrase password', classifiers=['Development Status :: 7 - Inactive'], )
Update the project Development Status
Update the project Development Status
Python
bsd-3-clause
dmtucker/keysmith
--- +++ @@ -41,5 +41,5 @@ py_modules=[keysmith.__name__], entry_points=ENTRY_POINTS, keywords='diceware generator keygen passphrase password', - classifiers=['Development Status :: 5 - Production/Stable'], + classifiers=['Development Status :: 7 - Inactive'], )
05ed915cab57ec8014a4d4636687132694171218
setup.py
setup.py
from setuptools import setup, find_packages import populous requirements = [ "click", ] setup( name="populous", version=populous.__version__, url=populous.__url__, description=populous.__doc__, author=populous.__author__, license=populous.__license__, long_description="TODO", packages=find_packages(), install_requires=requirements, entry_points={ 'console_scripts': [ 'populous = populous.__main__:cli' ] }, classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Utilities", ], keywords='populous populate database', )
from setuptools import setup, find_packages import populous requirements = [ "click", "cached-property", ] setup( name="populous", version=populous.__version__, url=populous.__url__, description=populous.__doc__, author=populous.__author__, license=populous.__license__, long_description="TODO", packages=find_packages(), install_requires=requirements, entry_points={ 'console_scripts': [ 'populous = populous.__main__:cli' ] }, classifiers=[ "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Utilities", ], keywords='populous populate database', )
Add cached-property to the dependencies
Add cached-property to the dependencies
Python
mit
novafloss/populous
--- +++ @@ -4,6 +4,7 @@ requirements = [ "click", + "cached-property", ] setup(
6edb67deeb3f19b3b5d24e262afc266ce2cb7600
setup.py
setup.py
from setuptools import setup import io # Take from Jeff Knupp's excellent article: # http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/ def read(*filenames, **kwargs): encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for filename in filenames: with io.open(filename, encoding=encoding) as f: buf.append(f.read()) return sep.join(buf) setup(name='dayonetools', version='1.0.0', description='Tools to import multiple services into Day One Journal', long_description=read('README.md'), package_data={'': ['README.md']}, license='MIT', author='Luke Lee', author_email='durdenmisc@gmail.com', url='https://github.com/durden/dayonetools', packages=['dayonetools', 'dayonetools.services'], install_requires=['python-dateutil>=2.2'], platforms='any', classifiers= [ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Natural Language :: English', 'Operating System :: OS Independent', 'Topic :: System :: Archiving'], entry_points={ "console_scripts": [ "dayonetools = dayonetools.main:main", ] }, )
from setuptools import setup import io # Take from Jeff Knupp's excellent article: # http://www.jeffknupp.com/blog/2013/08/16/open-sourcing-a-python-project-the-right-way/ def read(*filenames, **kwargs): encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for filename in filenames: with io.open(filename, encoding=encoding) as f: buf.append(f.read()) return sep.join(buf) setup(name='dayonetools', version='1.0.0', description='Tools to import multiple services into Day One Journal', long_description=read('README.md'), package_data={'': ['README.md']}, license='MIT', author='Luke Lee', author_email='durdenmisc@gmail.com', url='https://github.com/durden/dayonetools', packages=['dayonetools', 'dayonetools.services'], install_requires=['python-dateutil>=2.2', 'pytz==2014.4'], platforms='any', classifiers= [ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Natural Language :: English', 'Operating System :: OS Independent', 'Topic :: System :: Archiving'], entry_points={ "console_scripts": [ "dayonetools = dayonetools.main:main", ] }, )
Add pytz to installation requirements, needed for pedometerpp
Add pytz to installation requirements, needed for pedometerpp
Python
mit
durden/dayonetools
--- +++ @@ -26,7 +26,7 @@ author_email='durdenmisc@gmail.com', url='https://github.com/durden/dayonetools', packages=['dayonetools', 'dayonetools.services'], - install_requires=['python-dateutil>=2.2'], + install_requires=['python-dateutil>=2.2', 'pytz==2014.4'], platforms='any', classifiers= [ 'Development Status :: 4 - Beta',
edd54a7e34fdce5268ef1d15bd04b09e724a8508
setup.py
setup.py
#!/usr/bin/env python # coding=utf-8 from setuptools import setup # Get long description (used on PyPI project page) def get_long_description(): try: # Use pandoc to create reStructuredText README if possible import pypandoc return pypandoc.convert('README.md', 'rst') except: # Otherwise, default to using Markdown README with open('README.md', 'r') as readme_file: return readme_file.read() setup( name='alfred-workflow-packager', version='0.12.0', description='A CLI utility for packaging and exporting Alfred workflows', long_description=get_long_description(), url='https://github.com/caleb531/alfred-workflow-packager', author='Caleb Evans', author_email='caleb@calebevans.me', license='MIT', keywords='alfred workflow package export', packages=['awp'], package_data={ 'awp': ['data/config-schema.json'] }, install_requires=[ 'biplist >= 1, < 2', 'jsonschema >= 2, < 3' ], entry_points={ 'console_scripts': [ 'alfred-workflow-packager=awp.main:main', 'workflow-packager=awp.main:main', 'awp=awp.main:main' ] } )
#!/usr/bin/env python # coding=utf-8 from setuptools import setup # Get long description (used on PyPI project page) def get_long_description(): try: # Use pandoc to create reStructuredText README if possible import pypandoc return pypandoc.convert('README.md', 'rst') except: # Otherwise, default to using Markdown README with open('README.md', 'r') as readme_file: return readme_file.read() setup( name='alfred-workflow-packager', version='0.12.0', description='A CLI utility for packaging and exporting Alfred workflows', long_description=get_long_description(), url='https://github.com/caleb531/alfred-workflow-packager', author='Caleb Evans', author_email='caleb@calebevans.me', license='MIT', keywords='alfred workflow package export', packages=['awp'], package_data={ 'awp': ['data/config-schema.json'] }, install_requires=[ 'biplist >= 1, < 2', 'jsonschema >= 2, < 3' ], entry_points={ 'console_scripts': [ 'awp=awp.main:main' ] } )
Remove long utility command names in favor of awp
Remove long utility command names in favor of awp
Python
mit
caleb531/alfred-workflow-packager
--- +++ @@ -36,8 +36,6 @@ ], entry_points={ 'console_scripts': [ - 'alfred-workflow-packager=awp.main:main', - 'workflow-packager=awp.main:main', 'awp=awp.main:main' ] }
be8ea4018641cf79a347f2da04b078e725b0801e
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup, find_packages import sys with open('README.md') as f: readme = f.read() install_requires = [ 'seria', 'python-gnupg' ] setup( name='figgypy', version='0.3.dev', description='Simple configuration tool. Get config from yaml, json, or xml.', long_description=readme, author='Herkermer Sherwood', author_email='theherk@gmail.com', url='https://github.com/theherk/figgypy', download_url='https://github.com/theherk/figgypy/archive/0.3.dev.zip', packages=find_packages(), platforms=['all'], license='MIT', install_requires=install_requires, classifiers=[ 'Development Status :: 4 - Beta', 'License :: Other/Proprietary License', 'License :: OSI Approved :: MIT License', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Utilities', ], )
#!/usr/bin/env python from setuptools import setup, find_packages import sys with open('README.md') as f: readme = f.read() install_requires = [ 'gnupg>=2.0.2', 'seria', 'python-gnupg' ] setup( name='figgypy', version='0.3.dev', description='Simple configuration tool. Get config from yaml, json, or xml.', long_description=readme, author='Herkermer Sherwood', author_email='theherk@gmail.com', url='https://github.com/theherk/figgypy', download_url='https://github.com/theherk/figgypy/archive/0.3.dev.zip', packages=find_packages(), platforms=['all'], license='MIT', install_requires=install_requires, classifiers=[ 'Development Status :: 4 - Beta', 'License :: Other/Proprietary License', 'License :: OSI Approved :: MIT License', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Utilities', ], )
Add requirement for fork version of gnupg
Add requirement for fork version of gnupg
Python
mit
theherk/figgypy
--- +++ @@ -8,6 +8,7 @@ readme = f.read() install_requires = [ + 'gnupg>=2.0.2', 'seria', 'python-gnupg' ]
d8de5888e4519ba32d63d5f31aa4806d81c37399
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup VERSION = '0.0.1' test_requirements = [ 'nose>=1.3.7', 'responses>=0.5.1' ] install_requires = test_requirements + [ 'requests>=2.4.3', ] setup( name="mondo", version=VERSION, description="Mondo Banking API Client", author=', '.join(( 'Tito Miguel Costa', 'Simon Vans-Colina <simon@simon.vc>', )), url="https://github.com/simonvc/mxondo-python", packages=["mondo"], tests_require=test_requirements, install_requires=install_requires, license="MIT", )
#!/usr/bin/env python from setuptools import setup VERSION = '0.0.1' test_requirements = [ 'nose>=1.3.4', 'responses>=0.5.1' ] install_requires = test_requirements + [ 'requests>=2.4.3', ] setup( name="mondo", version=VERSION, description="Mondo Banking API Client", author=', '.join(( 'Tito Miguel Costa', 'Simon Vans-Colina <simon@simon.vc>', )), url="https://github.com/simonvc/mxondo-python", packages=["mondo"], tests_require=test_requirements, install_requires=install_requires, license="MIT", )
Update nose version for Python2
Update nose version for Python2
Python
mit
gabalese/mondo-python
--- +++ @@ -5,7 +5,7 @@ test_requirements = [ - 'nose>=1.3.7', + 'nose>=1.3.4', 'responses>=0.5.1' ]
bc687afb9421d61afe95f1c3a444e6c91971a113
setup.py
setup.py
import os.path # Install setuptools if not installed. try: import setuptools except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages # read README as the long description readme = 'README' if os.path.exists('README') else 'README.md' with open(readme, 'r') as f: long_description = f.read() setup( name='spandex', version='0.1dev', description='Spatial Analysis and Data Exploration', long_description=long_description, author='Synthicity', author_email='ejanowicz@synthicity.com', url='https://github.com/synthicity/spandex', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', ], packages=find_packages(exclude=['*.tests']), install_requires=[ 'GeoAlchemy2>=0.2.1', # Bug fix for schemas other than public. 'pandas>=0.15.0', 'psycopg2>=2.5', # connection and cursor context managers. 'six>=1.4', # Mapping for urllib. 'SQLAlchemy>=0.8' # GeoAlchemy2 support. ], extras_require={ 'prj': ['GDAL>=1.7'], # Python 3 support. 'rastertoolz': ['numpy>=1.8.0', 'rasterio>=0.12', 'rasterstats>=0.4', 'shapely>=1.3.2'] } )
import os.path # Install setuptools if not installed. try: import setuptools except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages # read README as the long description readme = 'README' if os.path.exists('README') else 'README.md' with open(readme, 'r') as f: long_description = f.read() setup( name='spandex', version='0.1dev', description='Spatial Analysis and Data Exploration', long_description=long_description, author='Synthicity', author_email='ejanowicz@synthicity.com', url='https://github.com/synthicity/spandex', classifiers=[ 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.7', ], packages=find_packages(exclude=['*.tests']), install_requires=[ 'GeoAlchemy2>=0.2.1', # Bug fix for schemas other than public. 'pandas>=0.15.0', # pandas.Index.difference. 'psycopg2>=2.5', # connection and cursor context managers. 'six>=1.4', # Mapping for urllib. 'SQLAlchemy>=0.8' # GeoAlchemy2 support. ], extras_require={ 'prj': ['GDAL>=1.7'], # Python 3 support. 'rastertoolz': ['numpy>=1.8.0', 'rasterio>=0.12', 'rasterstats>=0.4', 'shapely>=1.3.2'] } )
Add comment for updated Pandas requirement
Add comment for updated Pandas requirement
Python
bsd-3-clause
UDST/spandex,SANDAG/spandex
--- +++ @@ -30,7 +30,7 @@ packages=find_packages(exclude=['*.tests']), install_requires=[ 'GeoAlchemy2>=0.2.1', # Bug fix for schemas other than public. - 'pandas>=0.15.0', + 'pandas>=0.15.0', # pandas.Index.difference. 'psycopg2>=2.5', # connection and cursor context managers. 'six>=1.4', # Mapping for urllib. 'SQLAlchemy>=0.8' # GeoAlchemy2 support.
61f9f96645daaa42369b86f2af9404f3d228b22e
setup.py
setup.py
#!/usr/bin/env python3 from setuptools import setup from ipyrmd import __version__ setup(name="ipyrmd", version=__version__, description="Convert between IPython/Jupyter notebooks and RMarkdown", author="Gordon Ball", author_email="gordon@chronitis.net", url="https://github.com/chronitis/ipyrmd", packages=["ipyrmd"], license="MIT", install_requires=["nbformat", "pyyaml"], scripts=["scripts/ipyrmd"], keywords="ipython jupyter irkernel rmarkdown ipynb", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research" "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Lanuage :: R", "Environment :: Console", "Framework :: IPython", "Framework :: Jupyter", "Topic :: Scientific/Engineering", "Topic :: Utilities" ])
#!/usr/bin/env python3 from setuptools import setup from ipyrmd import __version__ setup(name="ipyrmd", version=__version__, description="Convert between IPython/Jupyter notebooks and RMarkdown", author="Gordon Ball", author_email="gordon@chronitis.net", url="https://github.com/chronitis/ipyrmd", packages=["ipyrmd"], license="MIT", install_requires=["nbformat", "pyyaml"], scripts=["scripts/ipyrmd"], keywords="ipython jupyter irkernel rmarkdown ipynb", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Lanuage :: R", "Environment :: Console", "Framework :: IPython", "Framework :: Jupyter", "Topic :: Scientific/Engineering", "Topic :: Utilities" ])
Add missing comma in classifier list
Add missing comma in classifier list
Python
mit
chronitis/ipyrmd
--- +++ @@ -17,7 +17,7 @@ classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", - "Intended Audience :: Science/Research" + "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Lanuage :: R",
5c0c0a86827ec6d2b8ece7cffddec3afbfcf72b6
setup.py
setup.py
import os from setuptools import setup, find_packages __version__ = '0.1' HERE = os.path.dirname(__file__) try: long_description = open(os.path.join(HERE, 'README.rst')).read() except: long_description = None setup( name='rubberjack-cli', version=__version__, packages=find_packages(exclude=['test*']), include_package_data=True, zip_safe=True, # metadata for upload to PyPI author='LaterPay GmbH', url='https://github.com/laterpay/rubberjack-cli', description='RubberJack manages (AWS) Elastic Beanstalks', long_description=long_description, license='MIT', keywords='aws', install_requires=[ 'boto', 'click', ], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", ], entry_points={ 'console_scripts': [ 'rubberjack=rubberjackcli.click:rubberjack', ], }, )
import os from setuptools import setup, find_packages __version__ = '0.1' HERE = os.path.dirname(__file__) def readme(): with open('README.rst') as f: return f.read() setup( name='rubberjack-cli', version=__version__, packages=find_packages(exclude=['test*']), include_package_data=True, zip_safe=True, # metadata for upload to PyPI author='LaterPay GmbH', url='https://github.com/laterpay/rubberjack-cli', description='RubberJack manages (AWS) Elastic Beanstalks', long_description=readme(), license='MIT', keywords='aws', install_requires=[ 'boto', 'click', ], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", ], entry_points={ 'console_scripts': [ 'rubberjack=rubberjackcli.click:rubberjack', ], }, )
Include README in package cf. PyPA recommendation
Include README in package cf. PyPA recommendation http://python-packaging.readthedocs.io/en/latest/metadata.html#a-readme-long-description
Python
mit
laterpay/rubberjack-cli
--- +++ @@ -5,10 +5,11 @@ HERE = os.path.dirname(__file__) -try: - long_description = open(os.path.join(HERE, 'README.rst')).read() -except: - long_description = None + +def readme(): + with open('README.rst') as f: + return f.read() + setup( name='rubberjack-cli', @@ -21,7 +22,7 @@ author='LaterPay GmbH', url='https://github.com/laterpay/rubberjack-cli', description='RubberJack manages (AWS) Elastic Beanstalks', - long_description=long_description, + long_description=readme(), license='MIT', keywords='aws',
bdee28d919458449d882df352ed6e2d675b87901
setup.py
setup.py
from setuptools import setup, find_packages version = __import__('eemeter').get_version() setup( name='eemeter', version=version, description='Open Energy Efficiency Meter', long_description=( "Standard methods for calculating energy efficiency savings." ), url='https://github.com/openeemeter/eemeter/', author='Open Energy Efficiency', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], keywords='open energy efficiency meter', packages=find_packages(), install_requires=[ 'holidays', 'lxml <= 3.6.1', 'numpy >= 1.10.2', 'scipy', 'pandas >= 0.18,<0.19', 'patsy', 'pytz', 'requests', 'scikit-learn', 'statsmodels >= 0.8.0rc1', 'SQLAlchemy', ], package_data={'': ['*.json', '*.gz']}, setup_requires=['pytest-runner'], tests_require=['pytest'], )
from setuptools import setup, find_packages version = __import__('eemeter').get_version() setup( name='eemeter', version=version, description='Open Energy Efficiency Meter', long_description=( "Standard methods for calculating energy efficiency savings." ), url='https://github.com/openeemeter/eemeter/', author='Open Energy Efficiency', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], keywords='open energy efficiency meter', packages=find_packages(), install_requires=[ 'holidays', 'lxml <= 3.6.1', 'numpy >= 1.10.2', 'scipy', 'pandas >= 0.19.2', 'patsy', 'pytz', 'requests', 'scikit-learn', 'statsmodels >= 0.8.0rc1', 'SQLAlchemy', ], package_data={'': ['*.json', '*.gz']}, setup_requires=['pytest-runner'], tests_require=['pytest'], )
Use higher pandas version for python 3.6.0 support
Use higher pandas version for python 3.6.0 support
Python
apache-2.0
openeemeter/eemeter,openeemeter/eemeter
--- +++ @@ -26,7 +26,7 @@ 'lxml <= 3.6.1', 'numpy >= 1.10.2', 'scipy', - 'pandas >= 0.18,<0.19', + 'pandas >= 0.19.2', 'patsy', 'pytz', 'requests',
e91d22d34027429f7d0db86d2800ba9ed73056be
setup.py
setup.py
from setuptools import setup, find_packages setup( name = 'django-news-sitemaps', version = '0.1.7', description = 'Generates sitemaps compatible with the Google News schema', author = 'TWT Web Devs', author_email = 'webdev@washingtontimes.com', url = 'http://github.com/washingtontimes/django-news-sitemaps/', include_package_data = True, packages = find_packages(), classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Operating System :: OS Independent', ] )
from setuptools import setup, find_packages setup( name = 'django-news-sitemaps', version = '1.0.0', description = 'Generates sitemaps compatible with the Google News schema', author = 'TWT Web Devs', author_email = 'webdev@washingtontimes.com', url = 'http://github.com/washingtontimes/django-news-sitemaps/', include_package_data = True, packages = find_packages(), classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Operating System :: OS Independent', ] )
Increment version number to 1.0.0
Increment version number to 1.0.0
Python
apache-2.0
theatlantic/django-news-sitemaps
--- +++ @@ -2,7 +2,7 @@ setup( name = 'django-news-sitemaps', - version = '0.1.7', + version = '1.0.0', description = 'Generates sitemaps compatible with the Google News schema', author = 'TWT Web Devs', author_email = 'webdev@washingtontimes.com',
96499391091a81302ac3947536b573c21f571677
setup.py
setup.py
from setuptools import setup from distutils import sysconfig import re #from setuptools.dist import Distribution site_packages_path = sysconfig.get_python_lib() sprem = re.match( r'.*(lib[\\/](python\d\.\d[\\/])?site-packages)', site_packages_path, re.I) rel_site_packages = sprem.group(1) #class PureDistribution(Distribution): # def is_pure(self): # return True setup( name = 'coverage_pth', version = '0.0.1', description = 'Coverage PTH file to enable coverage at the virtualenv level', #packages = '..', #include_pacakage_date=True, data_files=[ (rel_site_packages, ['coverage_pth.pth',]), ], install_requires=[ 'coverage', ], #distclass=PureDistribution, zip_safe=False, )
from setuptools import setup from distutils import sysconfig import re #from setuptools.dist import Distribution site_packages_path = sysconfig.get_python_lib() try: sprem = re.match( r'.*(lib[\\/](python\d(\.\d)*[\\/])?site-packages)', site_packages_path, re.I) if sprem is None: sprem = re.match( r'.*(lib[\\/](python\d(\.\d)*[\\/])?dist-packages)', site_packages_path, re.I) rel_site_packages = sprem.group(1) except Exception as exc: print("I'm having trouble finding your site-packages directory. Is it where you expect?") print("sysconfig.get_python_lib() returns '{}'".format(site_packages_path)) print("Exception was: {}".format(exc)) sys.exit(-1) #class PureDistribution(Distribution): # def is_pure(self): # return True setup( name = 'coverage_pth', version = '0.0.1', description = 'Coverage PTH file to enable coverage at the virtualenv level', #packages = '..', #include_pacakage_date=True, data_files=[ (rel_site_packages, ['coverage_pth.pth',]), ], install_requires=[ 'coverage', ], #distclass=PureDistribution, zip_safe=False, )
Expand regex for location of site-packages. Provide more info to user about possible failure to find site-packages
Expand regex for location of site-packages. Provide more info to user about possible failure to find site-packages
Python
bsd-2-clause
dougn/coverage_pth
--- +++ @@ -4,9 +4,18 @@ #from setuptools.dist import Distribution site_packages_path = sysconfig.get_python_lib() -sprem = re.match( - r'.*(lib[\\/](python\d\.\d[\\/])?site-packages)', site_packages_path, re.I) -rel_site_packages = sprem.group(1) +try: + sprem = re.match( + r'.*(lib[\\/](python\d(\.\d)*[\\/])?site-packages)', site_packages_path, re.I) + if sprem is None: + sprem = re.match( + r'.*(lib[\\/](python\d(\.\d)*[\\/])?dist-packages)', site_packages_path, re.I) + rel_site_packages = sprem.group(1) +except Exception as exc: + print("I'm having trouble finding your site-packages directory. Is it where you expect?") + print("sysconfig.get_python_lib() returns '{}'".format(site_packages_path)) + print("Exception was: {}".format(exc)) + sys.exit(-1) #class PureDistribution(Distribution): # def is_pure(self):
42043217547f05eb1652d481840d15b581b80434
setup.py
setup.py
# -*- coding: utf-8 -*- import os from setuptools import find_packages from setuptools import setup base_dir = os.path.dirname(__file__) setup( name='elastalert', version='0.0.99', description='Runs custom filters on Elasticsearch and alerts on matches', author='Quentin Long', author_email='qlo@yelp.com', setup_requires='setuptools', license='Copyright 2014 Yelp', entry_points={ 'console_scripts': ['elastalert-create-index=elastalert.create_index:main', 'elastalert-test-rule=elastalert.test_rule:main', 'elastalert-rule-from-kibana=elastalert.rule_from_kibana:main', 'elastalert=elastalert.elastalert:main']}, packages=find_packages(), package_data={'elastalert': ['schema.yaml']}, install_requires=[ 'argparse', 'elasticsearch', 'jira==0.32', # jira.exceptions is missing from later versions 'jsonschema', 'mock', 'python-dateutil', 'PyStaticConfiguration', 'pyyaml', 'simplejson', 'boto', 'botocore', 'blist', 'croniter', 'configparser', 'aws-requests-auth' ] )
# -*- coding: utf-8 -*- import os from setuptools import find_packages from setuptools import setup base_dir = os.path.dirname(__file__) setup( name='elastalert', version='0.1.0', description='Runs custom filters on Elasticsearch and alerts on matches', author='Quentin Long', author_email='qlo@yelp.com', setup_requires='setuptools', license='Copyright 2014 Yelp', entry_points={ 'console_scripts': ['elastalert-create-index=elastalert.create_index:main', 'elastalert-test-rule=elastalert.test_rule:main', 'elastalert-rule-from-kibana=elastalert.rule_from_kibana:main', 'elastalert=elastalert.elastalert:main']}, packages=find_packages(), package_data={'elastalert': ['schema.yaml']}, install_requires=[ 'argparse', 'elasticsearch', 'jira==0.32', # jira.exceptions is missing from later versions 'jsonschema', 'mock', 'python-dateutil', 'PyStaticConfiguration', 'pyyaml', 'simplejson', 'boto', 'botocore', 'blist', 'croniter', 'configparser', 'aws-requests-auth' ] )
Bump elastalert version to 0.1.0
Bump elastalert version to 0.1.0
Python
apache-2.0
Yelp/elastalert,jetyang2005/elastalert,jetyang2005/elastalert,jetyang2005/elastalert,dvopsway/elastalert,rprabhat/elastalert
--- +++ @@ -8,7 +8,7 @@ base_dir = os.path.dirname(__file__) setup( name='elastalert', - version='0.0.99', + version='0.1.0', description='Runs custom filters on Elasticsearch and alerts on matches', author='Quentin Long', author_email='qlo@yelp.com',
a1a651acf48604ab135961b324d3b9e271a2128b
setup.py
setup.py
from distutils.core import setup with open('README.md') as readme: with open('HISTORY.md') as history: long_description = readme.read() + '\n\n' + history.read() VERSION = '1.0' setup( name='argparse-autogen', py_modules=['argparse_autogen'], version=VERSION, url='https://github.com/sashgorokhov/argparse-autogen', download_url='https://github.com/sashgorokhov/argparse-autogen/archive/v%s.zip' % VERSION, keywords=['python', 'argparse', 'generate'], classifiers=[], long_description=long_description, license='MIT License', author='sashgorokhov', author_email='sashgorokhov@gmail.com', description="Parser with automatic creation of parsers and subparsers for paths.", )
from distutils.core import setup with open('README.md') as readme: with open('HISTORY.md') as history: long_description = readme.read() + '\n\n' + history.read() try: import pypandoc long_description = pypandoc.convert(long_description, 'rst') except(IOError, ImportError): long_description = long_description VERSION = '1.0.1' setup( name='argparse-autogen', py_modules=['argparse_autogen'], version=VERSION, url='https://github.com/sashgorokhov/argparse-autogen', download_url='https://github.com/sashgorokhov/argparse-autogen/archive/v%s.zip' % VERSION, keywords=['python', 'argparse', 'generate'], classifiers=[], long_description=long_description, license='MIT License', author='sashgorokhov', author_email='sashgorokhov@gmail.com', description="Parser with automatic creation of parsers and subparsers for paths.", )
Convert md to rst readme specially for PyPi
Convert md to rst readme specially for PyPi
Python
mit
sashgorokhov/argparse-autogen
--- +++ @@ -4,7 +4,14 @@ with open('HISTORY.md') as history: long_description = readme.read() + '\n\n' + history.read() -VERSION = '1.0' +try: + import pypandoc + + long_description = pypandoc.convert(long_description, 'rst') +except(IOError, ImportError): + long_description = long_description + +VERSION = '1.0.1' setup( name='argparse-autogen',
4a7a7a0b558358840440f937a03dcc88e469ca01
setup.py
setup.py
#!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.2', 'botocore>=1.4.8,<2.0.0', 'virtualenv>=15.0.0,<16.0.0', 'typing==3.5.2.2', ] setup( name='chalice', version='0.5.0', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='js@jamesls.com', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', ], )
#!/usr/bin/env python from setuptools import setup, find_packages with open('README.rst') as readme_file: README = readme_file.read() install_requires = [ 'click==6.6', 'botocore>=1.4.8,<2.0.0', 'virtualenv>=15.0.0,<16.0.0', 'typing==3.5.3.0', ] setup( name='chalice', version='0.5.0', description="Microframework", long_description=README, author="James Saryerwinnie", author_email='js@jamesls.com', url='https://github.com/jamesls/chalice', packages=find_packages(exclude=['tests']), install_requires=install_requires, license="Apache License 2.0", package_data={'chalice': ['*.json']}, include_package_data=True, zip_safe=False, keywords='chalice', entry_points={ 'console_scripts': [ 'chalice = chalice.cli:main', ] }, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', ], )
Upgrade type and click versions
Upgrade type and click versions
Python
apache-2.0
freaker2k7/chalice,awslabs/chalice
--- +++ @@ -7,10 +7,10 @@ install_requires = [ - 'click==6.2', + 'click==6.6', 'botocore>=1.4.8,<2.0.0', 'virtualenv>=15.0.0,<16.0.0', - 'typing==3.5.2.2', + 'typing==3.5.3.0', ]
e5e6d4ac9e86aa7e44694cf4746c4c9ec91df107
setup.py
setup.py
#!/usr/bin/env python from distutils.core import setup setup(name = 'skeleton', version = '0.0', description = 'A python skeleton project', long_description = ''' This project represents a basic python skeleton project that can be used as the basis for other projects. Feel free to fork this project and use as you see fit, but please update the information here. Note: Licensing only applies to the skeleton itself, should you use this skeleton as the basis for a new project, update the license accordingly. ''', author = 'Chris Salch', author_email = 'emeraldd.chris@gmail.com', url = 'https://github.com/arlaneenalra/python-skeleton', classifiers = [ 'License :: OSI Approved :: BSD License' ], license = 'License :: OSI Approved :: BSD License', packages = [], package_dir = { '': 'lib'}, scripts = [], py_modules = [], )
#!/usr/bin/env python from distutils.core import setup setup(name = 'skeleton', version = '0.0', description = 'A python skeleton project', long_description = ''' This project represents a basic python skeleton project that can be used as the basis for other projects. Feel free to fork this project and use as you see fit, but please update the information here. Note: Licensing only applies to the skeleton itself, should you use this skeleton as the basis for a new project, update the license accordingly. ''', author = 'Chris Salch', author_email = 'emeraldd.chris@gmail.com', url = 'https://github.com/arlaneenalra/python-skeleton', classifiers = [ 'License :: OSI Approved :: BSD License' ], license = 'License :: OSI Approved :: BSD License', packages = [], scripts = [], py_modules = [], package_dir = { '': 'lib'}, )
Move generic lib to botton of config.
Move generic lib to botton of config.
Python
bsd-2-clause
arlaneenalra/python-skeleton
--- +++ @@ -21,8 +21,8 @@ ], license = 'License :: OSI Approved :: BSD License', packages = [], - package_dir = { '': 'lib'}, scripts = [], py_modules = [], + package_dir = { '': 'lib'}, )
0ef968528f31da5dd09f016134b4a1ffa6377f84
scripts/slave/chromium/package_source.py
scripts/slave/chromium/package_source.py
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import sys if '__main__' == __name__: sys.exit(0)
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to package a checkout's source and upload it to Google Storage.""" import os import sys from common import chromium_utils from slave import slave_utils FILENAME = 'chromium-src.tgz' GSBASE = 'chromium-browser-csindex' def main(argv): if not os.path.exists('src'): raise Exception('ERROR: no src directory to package, exiting') chromium_utils.RunCommand(['rm', '-f', FILENAME]) if os.path.exists(FILENAME): raise Exception('ERROR: %s cannot be removed, exiting' % FILENAME) if chromium_utils.RunCommand(['tar', 'czf', FILENAME, 'src/']) != 0: raise Exception('ERROR: failed to create %s, exiting' % FILENAME) status = slave_utils.GSUtilCopyFile(FILENAME, GSBASE) if status != 0: raise Exception('ERROR: GSUtilCopyFile error %d. "%s" -> "%s"' % ( status, FILENAME, GSBASE)) return 0 if '__main__' == __name__: sys.exit(main(None))
Create source snapshot and upload to GS.
Create source snapshot and upload to GS. BUG=79198 Review URL: http://codereview.chromium.org/7129020 git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@88372 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
eunchong/build,eunchong/build,eunchong/build,eunchong/build
--- +++ @@ -6,8 +6,35 @@ """A tool to package a checkout's source and upload it to Google Storage.""" +import os import sys + +from common import chromium_utils +from slave import slave_utils + + +FILENAME = 'chromium-src.tgz' +GSBASE = 'chromium-browser-csindex' + + +def main(argv): + if not os.path.exists('src'): + raise Exception('ERROR: no src directory to package, exiting') + + chromium_utils.RunCommand(['rm', '-f', FILENAME]) + if os.path.exists(FILENAME): + raise Exception('ERROR: %s cannot be removed, exiting' % FILENAME) + + if chromium_utils.RunCommand(['tar', 'czf', FILENAME, 'src/']) != 0: + raise Exception('ERROR: failed to create %s, exiting' % FILENAME) + + status = slave_utils.GSUtilCopyFile(FILENAME, GSBASE) + if status != 0: + raise Exception('ERROR: GSUtilCopyFile error %d. "%s" -> "%s"' % ( + status, FILENAME, GSBASE)) + + return 0 if '__main__' == __name__: - sys.exit(0) + sys.exit(main(None))
6c8dd596a0f5f84acee54938d2f948f25445327d
src/Scripts/correlation-histogram.py
src/Scripts/correlation-histogram.py
# Take the output of the BitFunnel correlate command and generate histograms. from collections import defaultdict import csv term_term_correlation = defaultdict(int) term_all_correlation = defaultdict(int) # TODO: don't hardcode name. with open("/tmp/Correlate-0.csv") as f: reader = csv.reader(f) for row in reader: term_all = 0 pos = 0 for item in row: if pos > 0 and pos % 2 == 0: correlation = int(item) term_all += correlation term_term_correlation[correlation] += 1 pos += 1 term_all_correlation[term_all] += 1 def dict_to_csv(dd, filename): with open(filename, 'w') as f: writer = csv.writer(f) for k,v in dd.items(): writer.writerow([k,v]) dict_to_csv(term_term_correlation, "/tmp/term-term.csv") dict_to_csv(term_all_correlation, "/tmp/term-all.csv")
# Take the output of the BitFunnel correlate command and generate histograms. from collections import defaultdict import csv term_term_correlation = defaultdict(lambda:defaultdict(int)) term_all_correlation = defaultdict(lambda:defaultdict(int)) def bf_correlate_to_dicts(term_term_correlation, term_all_correlation, basepath, treatment): filename = basepath + "-" + treatment + ".csv" with open(filename) as f: reader = csv.reader(f) for row in reader: term_all = 0 pos = 0 for item in row: if pos > 0 and pos % 2 == 0: correlation = int(item) term_all += correlation term_term_correlation[treatment][correlation] += 1 pos += 1 term_all_correlation[treatment][term_all] += 1 def dict_to_csv(dd, filename): with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow(["bucket","y","treatment"]) for treatment,subdict in dd.items(): for k, v in subdict.items(): writer.writerow([k,v,treatment]) bf_correlate_to_dicts(term_term_correlation, term_all_correlation, "/tmp/correlate-150k", "rank3-rank0") bf_correlate_to_dicts(term_term_correlation, term_all_correlation, "/tmp/correlate-150k", "rank0") dict_to_csv(term_term_correlation, "/tmp/term-term.csv") dict_to_csv(term_all_correlation, "/tmp/term-all.csv")
Put multple treatments into the same histogram.
Put multple treatments into the same histogram.
Python
mit
danluu/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel
--- +++ @@ -3,28 +3,42 @@ from collections import defaultdict import csv -term_term_correlation = defaultdict(int) -term_all_correlation = defaultdict(int) +term_term_correlation = defaultdict(lambda:defaultdict(int)) +term_all_correlation = defaultdict(lambda:defaultdict(int)) -# TODO: don't hardcode name. -with open("/tmp/Correlate-0.csv") as f: - reader = csv.reader(f) - for row in reader: - term_all = 0 - pos = 0 - for item in row: - if pos > 0 and pos % 2 == 0: - correlation = int(item) - term_all += correlation - term_term_correlation[correlation] += 1 - pos += 1 - term_all_correlation[term_all] += 1 +def bf_correlate_to_dicts(term_term_correlation, + term_all_correlation, + basepath, + treatment): + filename = basepath + "-" + treatment + ".csv" + with open(filename) as f: + reader = csv.reader(f) + for row in reader: + term_all = 0 + pos = 0 + for item in row: + if pos > 0 and pos % 2 == 0: + correlation = int(item) + term_all += correlation + term_term_correlation[treatment][correlation] += 1 + pos += 1 + term_all_correlation[treatment][term_all] += 1 def dict_to_csv(dd, filename): with open(filename, 'w') as f: writer = csv.writer(f) - for k,v in dd.items(): - writer.writerow([k,v]) + writer.writerow(["bucket","y","treatment"]) + for treatment,subdict in dd.items(): + for k, v in subdict.items(): + writer.writerow([k,v,treatment]) +bf_correlate_to_dicts(term_term_correlation, + term_all_correlation, + "/tmp/correlate-150k", + "rank3-rank0") +bf_correlate_to_dicts(term_term_correlation, + term_all_correlation, + "/tmp/correlate-150k", + "rank0") dict_to_csv(term_term_correlation, "/tmp/term-term.csv") dict_to_csv(term_all_correlation, "/tmp/term-all.csv")
55dc12dcac51109b4974c58252929066ca985569
sqjobs/contrib/django/djsqjobs/finders.py
sqjobs/contrib/django/djsqjobs/finders.py
from sqjobs.utils import get_jobs_from_module import logging logger = logging.getLogger('sqjobs.contrib.django.utils') def get_apps_names(): """ copied from django-extensions compatibility sheam """ try: # django >= 1.7, to support AppConfig from django.apps import apps return [app.name for app in apps.get_app_configs()] except ImportError: from django.db import models return [app.__name__[:-6] for app in models.get_apps()] def register_all_jobs(worker): """ Register all the jobs in a worker """ jobs = get_all_jobs() for job in jobs: worker.register_job(job) return jobs def get_all_jobs(): """ Get all the jobs of the django INSTALLED_APPS """ jobs = [] for app_name in get_apps_names(): try: module = app_name + '.jobs' jobs.extend(get_jobs_from_module(module)) except ImportError: pass return jobs
from sqjobs.utils import get_jobs_from_module import logging logger = logging.getLogger('sqjobs.contrib.django.utils') def get_apps_names(): """ copied from django-extensions compatibility sheam """ try: # django >= 1.7, to support AppConfig from django.apps import apps return [app.name for app in apps.get_app_configs()] except ImportError: from django.db import models return [app.__name__[:-7] for app in models.get_apps()] def register_all_jobs(worker): """ Register all the jobs in a worker """ jobs = get_all_jobs() for job in jobs: worker.register_job(job) return jobs def get_all_jobs(): """ Get all the jobs of the django INSTALLED_APPS """ jobs = [] for app_name in get_apps_names(): try: module = app_name + '.jobs' jobs.extend(get_jobs_from_module(module)) except ImportError: pass return jobs
Remove 7 characters to get rid of ".models"
Remove 7 characters to get rid of ".models"
Python
bsd-3-clause
gnufede/sqjobs,gnufede/sqjobs
--- +++ @@ -14,7 +14,7 @@ return [app.name for app in apps.get_app_configs()] except ImportError: from django.db import models - return [app.__name__[:-6] for app in models.get_apps()] + return [app.__name__[:-7] for app in models.get_apps()] def register_all_jobs(worker):
24f69b587ce38c581f9ee68e22978963b266f010
html_to_telegraph.py
html_to_telegraph.py
# encoding=utf8 from lxml import html def _recursive_convert(element): # All strings outside tags should be ignored if not isinstance(element, html.HtmlElement): return fragment_root_element = { '_': element.tag } content = [] if element.text: content.append({'t': element.text}) if element.attrib: fragment_root_element.update({ 'a': dict(element.attrib) }) for child in element: content.append(_recursive_convert(child)) # Append Text node after element, if exists if child.tail: content.append({'t': child.tail}) if len(content): fragment_root_element.update({ 'c': content }) return fragment_root_element def convert_html_to_telegraph_format(html_string): return [ _recursive_convert(fragment) for fragment in html.fragments_fromstring(html_string) ]
# encoding=utf8 from lxml import html import json def _recursive_convert(element): # All strings outside tags should be ignored fragment_root_element = { '_': element.tag } content = [] if element.text: content.append({'t': element.text}) if element.attrib: fragment_root_element.update({ 'a': dict(element.attrib) }) for child in element: content.append(_recursive_convert(child)) # Append Text node after element, if exists if child.tail: content.append({'t': child.tail}) if len(content): fragment_root_element.update({ 'c': content }) return fragment_root_element def convert_html_to_telegraph_format(html_string): content = [] for fragment in html.fragments_fromstring(html_string): if not isinstance(fragment, html.HtmlElement): continue content.append(_recursive_convert(fragment)) return json.dumps(content, ensure_ascii=False)
Return string instead of list
Return string instead of list
Python
mit
mercuree/html-telegraph-poster
--- +++ @@ -1,11 +1,10 @@ # encoding=utf8 from lxml import html +import json def _recursive_convert(element): # All strings outside tags should be ignored - if not isinstance(element, html.HtmlElement): - return fragment_root_element = { '_': element.tag @@ -35,6 +34,10 @@ def convert_html_to_telegraph_format(html_string): - return [ - _recursive_convert(fragment) for fragment in html.fragments_fromstring(html_string) - ] + content = [] + for fragment in html.fragments_fromstring(html_string): + if not isinstance(fragment, html.HtmlElement): + continue + + content.append(_recursive_convert(fragment)) + return json.dumps(content, ensure_ascii=False)
962fc49f734b04e717bf936745013ab0c19c4ee1
utils.py
utils.py
import cv2 import itertools import numpy as np def partition(pred, iterable): """ Partition the iterable into two disjoint entries based on the predicate. @return: Tuple (iterable1, iterable2) """ iter1, iter2 = itertools.tee(iterable) return itertools.filterfalse(pred, iter1), filter(pred, iter2) def decay(val, min_val, decay_rate): return max(val * decay_rate, min_val) def one_hot(i, n): """ One-hot encoder. Returns a numpy array of length n with i-th entry set to 1, and all others set to 0." @return: numpy.array """ assert i < n, "Invalid args to one_hot" enc = np.zeros(n) enc[i] = 1 return enc def resize_image(image, width, height): """ Resize the image screen to the configured width and height and convert it to grayscale. """ grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) return cv2.resize(grayscale, (width, height))
from six.moves import filterfalse import cv2 import itertools import numpy as np def partition(pred, iterable): """ Partition the iterable into two disjoint entries based on the predicate. @return: Tuple (iterable1, iterable2) """ iter1, iter2 = itertools.tee(iterable) return filterfalse(pred, iter1), filter(pred, iter2) def decay(val, min_val, decay_rate): return max(val * decay_rate, min_val) def one_hot(i, n): """ One-hot encoder. Returns a numpy array of length n with i-th entry set to 1, and all others set to 0." @return: numpy.array """ assert i < n, "Invalid args to one_hot" enc = np.zeros(n) enc[i] = 1 return enc def resize_image(image, width, height): """ Resize the image screen to the configured width and height and convert it to grayscale. """ grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) return cv2.resize(grayscale, (width, height))
Fix python 2.7 compatibility issue
Fix python 2.7 compatibility issue
Python
mit
viswanathgs/dist-dqn,viswanathgs/dist-dqn
--- +++ @@ -1,3 +1,5 @@ +from six.moves import filterfalse + import cv2 import itertools import numpy as np @@ -10,7 +12,7 @@ @return: Tuple (iterable1, iterable2) """ iter1, iter2 = itertools.tee(iterable) - return itertools.filterfalse(pred, iter1), filter(pred, iter2) + return filterfalse(pred, iter1), filter(pred, iter2) def decay(val, min_val, decay_rate): return max(val * decay_rate, min_val)
1016664a5d0285a51455f90d47940b39f77562e4
src/pretix/base/views/cachedfiles.py
src/pretix/base/views/cachedfiles.py
import os from django.http import FileResponse, HttpRequest, HttpResponse from django.shortcuts import get_object_or_404 from django.utils.functional import cached_property from django.views.generic import TemplateView from pretix.base.models import CachedFile class DownloadView(TemplateView): template_name = "pretixbase/cachedfiles/pending.html" @cached_property def object(self) -> CachedFile: return get_object_or_404(CachedFile, id=self.kwargs['id']) def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: if 'ajax' in request.GET: return HttpResponse('1' if self.object.file else '0') elif self.object.file: resp = FileResponse(self.object.file.file, content_type=self.object.type) _, ext = os.path.splitext(self.object.filename) resp['Content-Disposition'] = 'attachment; filename="{}.{}"'.format(self.object.id, ext) return resp else: return super().get(request, *args, **kwargs)
import os from django.http import FileResponse, HttpRequest, HttpResponse from django.shortcuts import get_object_or_404 from django.utils.functional import cached_property from django.views.generic import TemplateView from pretix.base.models import CachedFile class DownloadView(TemplateView): template_name = "pretixbase/cachedfiles/pending.html" @cached_property def object(self) -> CachedFile: return get_object_or_404(CachedFile, id=self.kwargs['id']) def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: if 'ajax' in request.GET: return HttpResponse('1' if self.object.file else '0') elif self.object.file: resp = FileResponse(self.object.file.file, content_type=self.object.type) _, ext = os.path.splitext(self.object.filename) resp['Content-Disposition'] = 'attachment; filename="{}{}"'.format(self.object.id, ext) return resp else: return super().get(request, *args, **kwargs)
Remove duplicate dot in file downloads
Remove duplicate dot in file downloads
Python
apache-2.0
Flamacue/pretix,Flamacue/pretix,Flamacue/pretix,Flamacue/pretix
--- +++ @@ -21,7 +21,7 @@ elif self.object.file: resp = FileResponse(self.object.file.file, content_type=self.object.type) _, ext = os.path.splitext(self.object.filename) - resp['Content-Disposition'] = 'attachment; filename="{}.{}"'.format(self.object.id, ext) + resp['Content-Disposition'] = 'attachment; filename="{}{}"'.format(self.object.id, ext) return resp else: return super().get(request, *args, **kwargs)
917ba14418f01fa2fc866fc1c18989cc500c2cfd
bin/license_finder_pip.py
bin/license_finder_pip.py
#!/usr/bin/env python import json import sys from pip._internal.req import parse_requirements from pip._internal.download import PipSession from pip._vendor import pkg_resources from pip._vendor.six import print_ requirements = [pkg_resources.Requirement.parse(str(req.req)) for req in parse_requirements(sys.argv[1], session=PipSession()) if req.req != None] transform = lambda dist: { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'dependencies': list(map(lambda dependency: dependency.project_name, dist.requires())), } packages = [transform(dist) for dist in pkg_resources.working_set.resolve(requirements)] print_(json.dumps(packages))
#!/usr/bin/env python import json import sys try: from pip._internal.req import parse_requirements except ImportError: from pip.req import parse_requirements try: from pip._internal.download import PipSession except ImportError: from pip.download import PipSession from pip._vendor import pkg_resources from pip._vendor.six import print_ requirements = [pkg_resources.Requirement.parse(str(req.req)) for req in parse_requirements(sys.argv[1], session=PipSession()) if req.req != None] transform = lambda dist: { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'dependencies': list(map(lambda dependency: dependency.project_name, dist.requires())), } packages = [transform(dist) for dist in pkg_resources.working_set.resolve(requirements)] print_(json.dumps(packages))
Add backwards compatibility with pip v9
Add backwards compatibility with pip v9
Python
mit
pivotal/LicenseFinder,pivotal/LicenseFinder,pivotal/LicenseFinder,pivotal/LicenseFinder,pivotal/LicenseFinder,pivotal/LicenseFinder,pivotal/LicenseFinder
--- +++ @@ -2,8 +2,16 @@ import json import sys -from pip._internal.req import parse_requirements -from pip._internal.download import PipSession + +try: + from pip._internal.req import parse_requirements +except ImportError: + from pip.req import parse_requirements +try: + from pip._internal.download import PipSession +except ImportError: + from pip.download import PipSession + from pip._vendor import pkg_resources from pip._vendor.six import print_
3ba549b00e4ee8491e9adab7baa36e27edd45fa9
{{cookiecutter.extension_name}}/setup.py
{{cookiecutter.extension_name}}/setup.py
from setuptools import setup from setupbase import create_cmdclass, install_npm cmdclass = create_cmdclass(['js']) cmdclass['js'] = install_npm() setup_args = dict( name = '{{cookiecutter.extension_name}}', version = '0.18.0', packages = ['{{cookiecutter.extension_name}}'], author = '{{cookiecutter.author_name}}', author_email = '{{cookiecutter.author_email}}', url = 'http://jupyter.org', license = 'BSD', platforms = "Linux, Mac OS X, Windows", keywords = ['ipython', 'jupyter'], classifiers = [ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], cmdclass = cmdclass, install_requires = [ 'jupyterlab>=0.18.0', 'notebook>=4.3.0', 'ipython>=1.0.0' ] ) if __name__ == '__main__': setup(**setup_args)
from setuptools import setup from setupbase import create_cmdclass, install_npm cmdclass = create_cmdclass(['labextension', 'nbextension']) cmdclass['labextension'] = install_npm('labextension') cmdclass['nbextension'] = install_npm('nbextension') setup_args = dict( name = '{{cookiecutter.extension_name}}', version = '0.18.0', packages = ['{{cookiecutter.extension_name}}'], author = '{{cookiecutter.author_name}}', author_email = '{{cookiecutter.author_email}}', url = 'http://jupyter.org', license = 'BSD', platforms = "Linux, Mac OS X, Windows", keywords = ['ipython', 'jupyter'], classifiers = [ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], cmdclass = cmdclass, install_requires = [ 'jupyterlab>=0.18.0', 'notebook>=4.3.0', 'ipython>=1.0.0' ] ) if __name__ == '__main__': setup(**setup_args)
Create commands for labextension and nbextension
Create commands for labextension and nbextension
Python
cc0-1.0
jupyterlab/mimerender-cookiecutter,gnestor/mimerender-cookiecutter,jupyterlab/mimerender-cookiecutter,gnestor/mimerender-cookiecutter
--- +++ @@ -1,8 +1,9 @@ from setuptools import setup from setupbase import create_cmdclass, install_npm -cmdclass = create_cmdclass(['js']) -cmdclass['js'] = install_npm() +cmdclass = create_cmdclass(['labextension', 'nbextension']) +cmdclass['labextension'] = install_npm('labextension') +cmdclass['nbextension'] = install_npm('nbextension') setup_args = dict( name = '{{cookiecutter.extension_name}}',
9d5300f6688038ca59b07ed4033c8768658377c6
examples/prompts/multi-column-autocompletion-with-meta.py
examples/prompts/multi-column-autocompletion-with-meta.py
#!/usr/bin/env python """ Autocompletion example that shows meta-information alongside the completions. """ from __future__ import unicode_literals from prompt_toolkit.contrib.completers import WordCompleter from prompt_toolkit import prompt animal_completer = WordCompleter([ 'alligator', 'ant', 'ape', 'bat', 'bear', 'beaver', 'bee', 'bison', 'butterfly', 'cat', 'chicken', 'crocodile', 'dinosaur', 'dog', 'dolphine', 'dove', 'duck', 'eagle', 'elephant', ], meta_dict={ 'alligator': 'An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.', 'ant': 'Ants are eusocial insects of the family Formicidae', 'ape': 'Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates ', 'bat': 'Bats are mammals of the order Chiroptera', }, ignore_case=True) def main(): text = prompt('Give some animals: ', completer=animal_completer, display_completions_in_columns=True) print('You said: %s' % text) if __name__ == '__main__': main()
#!/usr/bin/env python """ Autocompletion example that shows meta-information alongside the completions. """ from __future__ import unicode_literals from prompt_toolkit.contrib.completers import WordCompleter from prompt_toolkit import prompt animal_completer = WordCompleter([ 'alligator', 'ant', 'ape', 'bat', 'bear', 'beaver', 'bee', 'bison', 'butterfly', 'cat', 'chicken', 'crocodile', 'dinosaur', 'dog', 'dolphin', 'dove', 'duck', 'eagle', 'elephant', ], meta_dict={ 'alligator': 'An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.', 'ant': 'Ants are eusocial insects of the family Formicidae', 'ape': 'Apes (Hominoidea) are a branch of Old World tailless anthropoid catarrhine primates ', 'bat': 'Bats are mammals of the order Chiroptera', }, ignore_case=True) def main(): text = prompt('Give some animals: ', completer=animal_completer, display_completions_in_columns=True) print('You said: %s' % text) if __name__ == '__main__': main()
Fix typo: `dolphine` -> `dolphin`
Fix typo: `dolphine` -> `dolphin`
Python
bsd-3-clause
jonathanslenders/python-prompt-toolkit
--- +++ @@ -15,7 +15,7 @@ 'bat', 'bear', 'beaver', 'bee', 'bison', 'butterfly', 'cat', 'chicken', - 'crocodile', 'dinosaur', 'dog', 'dolphine', 'dove', 'duck', 'eagle', + 'crocodile', 'dinosaur', 'dog', 'dolphin', 'dove', 'duck', 'eagle', 'elephant', ], meta_dict={ 'alligator': 'An alligator is a crocodilian in the genus Alligator of the family Alligatoridae.',
973c6a541deadb5a4b7c23dae191acf9d4c1be27
buffer/tests/test_link.py
buffer/tests/test_link.py
from nose.tools import eq_, raises from mock import MagicMock, patch from buffer.models.link import Link def test_links_shares(): ''' Test link's shares retrieving from constructor ''' mocked_api = MagicMock() mocked_api.get.return_value = {'shares': 123} link = Link(api=mocked_api, url='www.google.com') eq_(link, {'shares': 123, 'url': 'www.google.com', 'api': mocked_api})
from nose.tools import eq_, raises from mock import MagicMock, patch from buffer.models.link import Link def test_links_shares(): ''' Test link's shares retrieving from constructor ''' mocked_api = MagicMock() mocked_api.get.return_value = {'shares': 123} link = Link(api=mocked_api, url='www.google.com') eq_(link, {'shares': 123, 'url': 'www.google.com', 'api': mocked_api}) mocked_api.get.assert_called_once_with(url='links/shares.json?url=www.google.com') def test_links_get_shares(): ''' Test link's shares retrieving method ''' mocked_api = MagicMock() mocked_api.get.return_value = {'shares': 123} link = Link(api=mocked_api, url='www.google.com') eq_(link, {'shares': 123, 'url': 'www.google.com', 'api': mocked_api}) eq_(link.get_shares(), 123) mocked_api.get.assert_any_call(url='links/shares.json?url=www.google.com') eq_(mocked_api.get.call_count, 2)
Test link's shares retrieving using get_share method
Test link's shares retrieving using get_share method
Python
mit
bufferapp/buffer-python,vtemian/buffpy
--- +++ @@ -14,3 +14,19 @@ link = Link(api=mocked_api, url='www.google.com') eq_(link, {'shares': 123, 'url': 'www.google.com', 'api': mocked_api}) + mocked_api.get.assert_called_once_with(url='links/shares.json?url=www.google.com') + +def test_links_get_shares(): + ''' + Test link's shares retrieving method + ''' + + mocked_api = MagicMock() + mocked_api.get.return_value = {'shares': 123} + + link = Link(api=mocked_api, url='www.google.com') + + eq_(link, {'shares': 123, 'url': 'www.google.com', 'api': mocked_api}) + eq_(link.get_shares(), 123) + mocked_api.get.assert_any_call(url='links/shares.json?url=www.google.com') + eq_(mocked_api.get.call_count, 2)
fbf42c288a6faa13ac918047eac09985cbd6f6e0
cal/v1/network/drivers/openstack_network.py
cal/v1/network/drivers/openstack_network.py
""" OpenstackDriver for Network based on NetworkDriver """ from neutronclient.v2_0 import client from network_driver import NetworkDriver class OpenstackNetWorkDriver(NetworkDriver): """docstring for OpenstackNetWorkDriver""" def __init__(self, auth_url, project_name, username, password, user_domain_name=None, project_domain_name=None, driver_name=None): super(OpenstackNetWorkDriver, self).__init__() self.provider = "OPENSTACK" self.auth_url = auth_url self.project_domain_name = project_domain_name self.user_domain_name = user_domain_name self.project_name = project_name self.username = username self.password = password if driver_name: self.driver_name = driver_name else: self.driver_name = "default" self._setup() def _setup(self): self.client = client.Client( username=self.username, password=self.password, tenant_name=self.project_name, auth_url=self.auth_url ) def create(self): raise NotImplementedError def show(self): raise NotImplementedError def list(self): raise NotImplementedError def update(self): raise NotImplementedError def delete(self): raise NotImplementedError
""" OpenstackDriver for Network based on NetworkDriver """ from neutronclient.v2_0 import client from network_driver import NetworkDriver class OpenstackNetWorkDriver(NetworkDriver): """docstring for OpenstackNetWorkDriver""" def __init__(self, auth_url, project_name, username, password, **kargs): super(OpenstackNetWorkDriver, self).__init__() self.provider = "OPENSTACK" self.auth_url = auth_url self.project_name = project_name self.username = username self.password = password self.driver_name = kargs.pop('driver_name', 'default') self._setup() def _setup(self): self.client = client.Client( username=self.username, password=self.password, project_name=self.project_name, auth_url=self.auth_url ) def create(self, network): return self.client.create_network({'network': network}) def show(self, network_id): return self.client.show_network(network_id) def list(self, retrieve_all=True, **kargs): return self.client.list_networks(retrieve_all, **kargs) def update(self, network_id, network): return self.client.update_network(network_id, {'network': network}) def delete(self, network_id): return self.client.delete_network(network_id)
Add neutron client without test
Add neutron client without test
Python
apache-2.0
cloudcomputinghust/CAL
--- +++ @@ -7,45 +7,39 @@ class OpenstackNetWorkDriver(NetworkDriver): + """docstring for OpenstackNetWorkDriver""" def __init__(self, auth_url, project_name, - username, password, user_domain_name=None, - project_domain_name=None, driver_name=None): + username, password, **kargs): super(OpenstackNetWorkDriver, self).__init__() self.provider = "OPENSTACK" self.auth_url = auth_url - self.project_domain_name = project_domain_name - self.user_domain_name = user_domain_name self.project_name = project_name self.username = username self.password = password - if driver_name: - self.driver_name = driver_name - else: - self.driver_name = "default" - + self.driver_name = kargs.pop('driver_name', 'default') self._setup() def _setup(self): self.client = client.Client( username=self.username, password=self.password, - tenant_name=self.project_name, + project_name=self.project_name, auth_url=self.auth_url ) - def create(self): - raise NotImplementedError + def create(self, network): + return self.client.create_network({'network': network}) - def show(self): - raise NotImplementedError + def show(self, network_id): + return self.client.show_network(network_id) - def list(self): - raise NotImplementedError + def list(self, retrieve_all=True, **kargs): + return self.client.list_networks(retrieve_all, **kargs) - def update(self): - raise NotImplementedError + def update(self, network_id, network): + return self.client.update_network(network_id, {'network': network}) - def delete(self): - raise NotImplementedError + def delete(self, network_id): + return self.client.delete_network(network_id)
10ae884bd68feca56b3893b84221b867f3b0aec3
orangecontrib/text/vectorization/base.py
orangecontrib/text/vectorization/base.py
import numpy as np from gensim import matutils from gensim.corpora import Dictionary class BaseVectorizer: """Base class for vectorization objects. """ name = NotImplemented def transform(self, corpus, copy=True): """Transforms a corpus to a new one with additional attributes. """ if copy: corpus = corpus.copy() if not len(corpus.dictionary): return corpus else: return self._transform(corpus) def _transform(self, corpus): raise NotImplementedError def report(self): """Reports configuration items.""" raise NotImplementedError @staticmethod def add_features(corpus, X, dictionary): order = np.argsort([dictionary[i] for i in range(len(dictionary))]) corpus.extend_attributes(X[:, order], feature_names=(dictionary[i] for i in order), var_attrs={'hidden': True}) corpus.ngrams_corpus = matutils.Sparse2Corpus(X.T)
import numpy as np from gensim import matutils from gensim.corpora import Dictionary class BaseVectorizer: """Base class for vectorization objects. """ name = NotImplemented def transform(self, corpus, copy=True): """Transforms a corpus to a new one with additional attributes. """ if copy: corpus = corpus.copy() if not len(corpus.dictionary): return corpus else: return self._transform(corpus) def _transform(self, corpus): raise NotImplementedError def report(self): """Reports configuration items.""" raise NotImplementedError @staticmethod def add_features(corpus, X, dictionary): order = np.argsort([dictionary[i] for i in range(len(dictionary))]) corpus.extend_attributes(X[:, order], feature_names=(dictionary[i] for i in order), var_attrs={'hidden': True, 'skip-normalization': True}) corpus.ngrams_corpus = matutils.Sparse2Corpus(X.T)
Mark features to skip normalization
BoW: Mark features to skip normalization This fixes SVM on sparse data.
Python
bsd-2-clause
cheral/orange3-text,cheral/orange3-text,cheral/orange3-text
--- +++ @@ -29,5 +29,5 @@ order = np.argsort([dictionary[i] for i in range(len(dictionary))]) corpus.extend_attributes(X[:, order], feature_names=(dictionary[i] for i in order), - var_attrs={'hidden': True}) + var_attrs={'hidden': True, 'skip-normalization': True}) corpus.ngrams_corpus = matutils.Sparse2Corpus(X.T)
2d6babf3bf6107b8a5c42fe76a4d17d7fa0b51f6
catkin/src/statistics/scripts/listener.py
catkin/src/statistics/scripts/listener.py
#!/usr/bin/env python import rospy import socket from statistics.msg import StatsD class StatsHandler(): def __init__(self, statsd_host, statsd_port): self.statsd_target = (statsd_host, statsd_port) self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) def _send_msg(self, statsd_msg): self.sock.sendto(statsd_msg, self.statsd_target) def handle(self, msg): statsd_msg = 'display.{name}:{value}|{type}|@{rate}'.format( name=msg.name, value=msg.value, type=msg.type, rate=msg.rate ) print statsd_msg self._send_msg(statsd_msg) def listen(): rospy.init_node('statistics') statsd_host = rospy.get_param( '~statsd_host', 'lg-head' ) statsd_port = rospy.get_param( '~statsd_port', 8125 ) handler = StatsHandler(statsd_host, statsd_port) rospy.Subscriber('statistics/render', StatsD, handler.handle) rospy.spin() if __name__=='__main__': listen()
#!/usr/bin/env python import rospy import socket from statistics.msg import StatsD class StatsDHandler(): def __init__(self, statsd_host, statsd_port): self.statsd_target = (statsd_host, statsd_port) self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) def _send_msg(self, statsd_msg): self.sock.sendto(statsd_msg, self.statsd_target) def handle(self, msg): statsd_msg = 'display.{name}:{value}|{type}|@{rate}'.format( name=msg.name, value=msg.value, type=msg.type, rate=msg.rate ) print statsd_msg self._send_msg(statsd_msg) def listen(): rospy.init_node('statistics') statsd_host = rospy.get_param( '~statsd_host', 'lg-head' ) statsd_port = rospy.get_param( '~statsd_port', 8125 ) handler = StatsDHandler(statsd_host, statsd_port) rospy.Subscriber('statistics/render', StatsD, handler.handle) rospy.spin() if __name__=='__main__': listen()
Rename statistics handler to be more specific
Rename statistics handler to be more specific
Python
apache-2.0
EndPointCorp/appctl,EndPointCorp/appctl
--- +++ @@ -4,7 +4,7 @@ import socket from statistics.msg import StatsD -class StatsHandler(): +class StatsDHandler(): def __init__(self, statsd_host, statsd_port): self.statsd_target = (statsd_host, statsd_port) self.sock = socket.socket( @@ -37,7 +37,7 @@ 8125 ) - handler = StatsHandler(statsd_host, statsd_port) + handler = StatsDHandler(statsd_host, statsd_port) rospy.Subscriber('statistics/render', StatsD, handler.handle)
01059774f04d26b69a1ddc058416f627da396a58
src/tempel/models.py
src/tempel/models.py
from django.db import models from django.conf import settings from tempel import utils class Entry(models.Model): content = models.TextField() language = models.CharField(max_length=20, choices=utils.get_languages()) created = models.DateTimeField(auto_now_add=True) active = models.BooleanField(default=True) class Meta: ordering = ['-created'] verbose_name_plural = "entries" def get_language(self): return utils.get_language(self.language) def get_mimetype(self): return utils.get_mimetype(self.language) def get_filename(self): return '%s.%s' % (self.id, self.get_extension()) def get_extension(self): return utils.get_extension(self.language) def __unicode__(self): return '<Entry: id=%s lang=%s>' % (self.id, self.language) class EditToken(models.Model): entry = models.ForeignKey(Entry, unique=True) token = models.CharField(max_length=8) expires = models.DateTimeField() def __unicode__(self): return '<EditToken: entry=%s token=%s>' % (self.entry_id, self.token)
from datetime import datetime from django.db import models from django.conf import settings from tempel import utils class Entry(models.Model): content = models.TextField() language = models.CharField(max_length=20, choices=utils.get_languages()) created = models.DateTimeField(default=datetime.now) active = models.BooleanField(default=True) class Meta: ordering = ['-created'] verbose_name_plural = "entries" def get_language(self): return utils.get_language(self.language) def get_mimetype(self): return utils.get_mimetype(self.language) def get_filename(self): return '%s.%s' % (self.id, self.get_extension()) def get_extension(self): return utils.get_extension(self.language) def __unicode__(self): return '<Entry: id=%s lang=%s>' % (self.id, self.language) class EditToken(models.Model): entry = models.ForeignKey(Entry, unique=True) token = models.CharField(max_length=8) expires = models.DateTimeField() def __unicode__(self): return '<EditToken: entry=%s token=%s>' % (self.entry_id, self.token)
Replace Entry's created's auto_now_add=True with default=datetime.now
Replace Entry's created's auto_now_add=True with default=datetime.now
Python
agpl-3.0
fajran/tempel
--- +++ @@ -1,3 +1,5 @@ +from datetime import datetime + from django.db import models from django.conf import settings @@ -7,7 +9,7 @@ content = models.TextField() language = models.CharField(max_length=20, choices=utils.get_languages()) - created = models.DateTimeField(auto_now_add=True) + created = models.DateTimeField(default=datetime.now) active = models.BooleanField(default=True) class Meta:
9a061d7b13482046509c6c231b0018a600cb9155
tests/__init__.py
tests/__init__.py
import os NOSQL = any([name in (os.getenv("DB") or "") for name in ("datastore", "mongodb", "imap")]) if NOSQL: from test_dal_nosql import * else: from sql import *
import os NOSQL = any([name in (os.getenv("DB") or "") for name in ("datastore", "mongodb", "imap")]) if NOSQL: from nosql import * else: from sql import *
Fix wrong import in tests
Fix wrong import in tests
Python
bsd-3-clause
niphlod/pydal,willimoa/pydal,kmcheung12/pydal,manuelep/pydal,stephenrauch/pydal,michele-comitini/pydal,web2py/pydal
--- +++ @@ -4,6 +4,6 @@ for name in ("datastore", "mongodb", "imap")]) if NOSQL: - from test_dal_nosql import * + from nosql import * else: from sql import *
5ebe17f5dc88fef7718d2c3665b905cc8d7fab7c
alembic/versions/147d1de2e5e4_add_periodicity_of_script.py
alembic/versions/147d1de2e5e4_add_periodicity_of_script.py
"""Add periodicity of script Revision ID: 147d1de2e5e4 Revises: Create Date: 2019-04-19 18:48:33.526449 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '147d1de2e5e4' down_revision = None branch_labels = None depends_on = None def upgrade(): op.add_column('script', sa.Column('periodicity', sa.String(),server_default='daily')) def downgrade(): op.drop_column('script', 'periodicity')
"""Add periodicity of script Revision ID: 147d1de2e5e4 Revises: Create Date: 2019-04-19 18:48:33.526449 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '147d1de2e5e4' down_revision = None branch_labels = None depends_on = None def upgrade(): op.add_column('script', sa.Column('periodicity', sa.String(),server_default='daily')) def downgrade(): # Sqlite does not handle the drop column directive, but alembic can take care of # creating a migration script if we use its batch mode with op.batch_alter_table("script") as batch_op: batch_op.drop_column('periodicity')
Handle downgrade on a sqlite db
Handle downgrade on a sqlite db
Python
mit
e-henry/scripto,e-henry/scripto,e-henry/scripto,e-henry/scripto
--- +++ @@ -21,4 +21,7 @@ def downgrade(): - op.drop_column('script', 'periodicity') + # Sqlite does not handle the drop column directive, but alembic can take care of + # creating a migration script if we use its batch mode + with op.batch_alter_table("script") as batch_op: + batch_op.drop_column('periodicity')
83c64e096ff8b79e2ae22edbcabb483dcf27302f
tests/conftest.py
tests/conftest.py
import pytest @pytest.fixture(autouse=True) def platform(request): marker = request.node.get_marker('platform') if marker: expected = marker.args[0] minion = request.getfuncargvalue('minion') platform = minion['container'].get_os_release()['ID'] action = marker.kwargs.get('action', 'skip') if platform != expected and action == 'skip': pytest.skip('skipped on this platform: {}'.format(platform)) elif platform == expected and action == 'xfail': request.node.add_marker(pytest.mark.xfail())
import pytest @pytest.fixture(autouse=True) def platform(request): marker = request.node.get_marker('platform') if marker: expected = marker.args[0] minion = request.getfuncargvalue('minion') os_release = minion['container'].get_os_release() platform = os_release.get('ID', 'sles') action = marker.kwargs.get('action', 'skip') if platform != expected and action == 'skip': pytest.skip('skipped on this platform: {}'.format(platform)) elif platform == expected and action == 'xfail': request.node.add_marker(pytest.mark.xfail())
Fix platform detection for sles <12
Fix platform detection for sles <12
Python
mit
dincamihai/salt-toaster,dincamihai/salt-toaster
--- +++ @@ -7,7 +7,8 @@ if marker: expected = marker.args[0] minion = request.getfuncargvalue('minion') - platform = minion['container'].get_os_release()['ID'] + os_release = minion['container'].get_os_release() + platform = os_release.get('ID', 'sles') action = marker.kwargs.get('action', 'skip') if platform != expected and action == 'skip': pytest.skip('skipped on this platform: {}'.format(platform))
c5a0f1f26d2527f49ec00d842ffd56be5f0a9965
detect/findIgnoreLists.py
detect/findIgnoreLists.py
#!/usr/bin/env python import os THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) THIS_REPO_PATH = os.path.abspath(os.path.join(THIS_SCRIPT_DIRECTORY, os.pardir)) REPO_PARENT_PATH = os.path.abspath(os.path.join(THIS_SCRIPT_DIRECTORY, os.pardir, os.pardir)) # Lets us combine ignore lists: # from private&public fuzzing repos # for project branches and for their base branches (e.g. mozilla-central) # # Given a targetRepo "mozilla-central/ionmonkey" and a name "crashes.txt", returns a list of 2N absolute paths like: # ???/funfuzz*/known/mozilla-central/ionmonkey/crashes.txt # ???/funfuzz*/known/mozilla-central/crashes.txt def findIgnoreLists(targetRepo, needle): r = [] assert not targetRepo.startswith("/") for name in sorted(os.listdir(REPO_PARENT_PATH)): if name.startswith("fuzzing") or name.startswith("funfuzz"): knownPath = os.path.join(REPO_PARENT_PATH, name, "known", targetRepo) if os.path.isdir(knownPath): while os.path.basename(knownPath) != "known": filename = os.path.join(knownPath, needle) if os.path.exists(filename): r.append(filename) knownPath = os.path.dirname(knownPath) assert len(r) > 0 return r
#!/usr/bin/env python import os THIS_SCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__)) THIS_REPO_PATH = os.path.abspath(os.path.join(THIS_SCRIPT_DIRECTORY, os.pardir)) REPO_PARENT_PATH = os.path.abspath(os.path.join(THIS_SCRIPT_DIRECTORY, os.pardir, os.pardir)) # Lets us combine ignore lists: # from private&public fuzzing repos # for project branches and for their base branches (e.g. mozilla-central) # # Given a targetRepo "mozilla-central/ionmonkey" and a name "crashes.txt", returns a list of 2N absolute paths like: # ???/funfuzz*/known/mozilla-central/ionmonkey/crashes.txt # ???/funfuzz*/known/mozilla-central/crashes.txt def findIgnoreLists(targetRepo, needle): r = [] assert not targetRepo.startswith("/") for name in sorted(os.listdir(REPO_PARENT_PATH)): if name.startswith("funfuzz"): knownPath = os.path.join(REPO_PARENT_PATH, name, "known", targetRepo) if os.path.isdir(knownPath): while os.path.basename(knownPath) != "known": filename = os.path.join(knownPath, needle) if os.path.exists(filename): r.append(filename) knownPath = os.path.dirname(knownPath) assert len(r) > 0 return r
Stop looking in the old fuzzing/ directory
Stop looking in the old fuzzing/ directory
Python
mpl-2.0
nth10sd/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz
--- +++ @@ -19,7 +19,7 @@ r = [] assert not targetRepo.startswith("/") for name in sorted(os.listdir(REPO_PARENT_PATH)): - if name.startswith("fuzzing") or name.startswith("funfuzz"): + if name.startswith("funfuzz"): knownPath = os.path.join(REPO_PARENT_PATH, name, "known", targetRepo) if os.path.isdir(knownPath): while os.path.basename(knownPath) != "known":
f182bb2dd76483891148476b14ccad08a596b080
tests/test_lesson_4_temperature.py
tests/test_lesson_4_temperature.py
import unittest from lessons.lesson_4_temperature import temperature class CtoFTestCase(unittest.TestCase): def test_handles_freezing_point(self): freezing = temperature.c_to_f(0) self.assertEqual(freezing, 32) def test_handles_boiling_point(self): boiling = temperature.c_to_f(100) self.assertEqual(boiling, 212) def test_handle_room_temp(self): arbitrary = temperature.c_to_f(20) self.assertEqual(arbitrary, 68) def test_handles_body_temp(self): body_temp = temperature.c_to_f(37) self.assertEqual(body_temp, 98) class FtoCTestCase(unittest.TestCase): def test_handles_freezing_point(self): freezing = temperature.f_to_c(32) self.assertEqual(freezing, 0) def test_handles_boiling_point(self): boiling = temperature.f_to_c(212) self.assertEqual(boiling, 100) def test_handles_arbitrary_temp(self): arbitrary = temperature.f_to_c(68) self.assertEqual(arbitrary, 20) def test_handles_body_temp(self): body_temp = temperature.f_to_c(98.6) self.assertEqual(body_temp, 37)
Add tests for temperature conversions in lesson 4.
Add tests for temperature conversions in lesson 4.
Python
mit
thejessleigh/test_driven_python,thejessleigh/test_driven_python,thejessleigh/test_driven_python
--- +++ @@ -0,0 +1,39 @@ +import unittest + +from lessons.lesson_4_temperature import temperature + + +class CtoFTestCase(unittest.TestCase): + def test_handles_freezing_point(self): + freezing = temperature.c_to_f(0) + self.assertEqual(freezing, 32) + + def test_handles_boiling_point(self): + boiling = temperature.c_to_f(100) + self.assertEqual(boiling, 212) + + def test_handle_room_temp(self): + arbitrary = temperature.c_to_f(20) + self.assertEqual(arbitrary, 68) + + def test_handles_body_temp(self): + body_temp = temperature.c_to_f(37) + self.assertEqual(body_temp, 98) + + +class FtoCTestCase(unittest.TestCase): + def test_handles_freezing_point(self): + freezing = temperature.f_to_c(32) + self.assertEqual(freezing, 0) + + def test_handles_boiling_point(self): + boiling = temperature.f_to_c(212) + self.assertEqual(boiling, 100) + + def test_handles_arbitrary_temp(self): + arbitrary = temperature.f_to_c(68) + self.assertEqual(arbitrary, 20) + + def test_handles_body_temp(self): + body_temp = temperature.f_to_c(98.6) + self.assertEqual(body_temp, 37)
72c00dca2c8310744c424296d8f712909bc95b95
infosystem/subsystem/capability/entity.py
infosystem/subsystem/capability/entity.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import UniqueConstraint from infosystem.common.subsystem import entity from infosystem.database import db class Capability(entity.Entity, db.Model): attributes = ['id', 'name', 'url', 'method'] name = db.Column(db.String(30), nullable=False) url = db.Column(db.String(100), nullable=False) method = db.Column(db.String(10), nullable=False) UniqueConstraint('url', 'method', name='capability_uk') def __init__(self, id, name, url, method): super(Capability, self).__init__(id) self.name = name self.url = url self.method = method
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import UniqueConstraint from infosystem.common.subsystem import entity from infosystem.database import db class Capability(entity.Entity, db.Model): attributes = ['id', 'name', 'url', 'method'] name = db.Column(db.String(30), nullable=False) url = db.Column(db.String(100), nullable=False) method = db.Column(db.String(10), nullable=False) __table_args__ = (UniqueConstraint('url', 'method', name='capability_uk'),) def __init__(self, id, name, url, method): super(Capability, self).__init__(id) self.name = name self.url = url self.method = method
Fix unique contraint in capability
Fix unique contraint in capability
Python
apache-2.0
samueldmq/infosystem
--- +++ @@ -21,7 +21,7 @@ name = db.Column(db.String(30), nullable=False) url = db.Column(db.String(100), nullable=False) method = db.Column(db.String(10), nullable=False) - UniqueConstraint('url', 'method', name='capability_uk') + __table_args__ = (UniqueConstraint('url', 'method', name='capability_uk'),) def __init__(self, id, name, url, method): super(Capability, self).__init__(id)
93c6a89bd6f0d8ff9a32e37d4f6e9c4ed0aa3f8f
openedx/core/djangoapps/schedules/apps.py
openedx/core/djangoapps/schedules/apps.py
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class SchedulesConfig(AppConfig): name = 'openedx.core.djangoapps.schedules' verbose_name = _('Schedules') def ready(self): # noinspection PyUnresolvedReferences from . import signals, tasks # pylint: disable=unused-variable
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class SchedulesConfig(AppConfig): name = 'openedx.core.djangoapps.schedules' verbose_name = _('Schedules') plugin_app = {} def ready(self): # noinspection PyUnresolvedReferences from . import signals, tasks # pylint: disable=unused-variable
Update schedules app to be a Django App Plugin
Update schedules app to be a Django App Plugin
Python
agpl-3.0
EDUlib/edx-platform,ahmedaljazzar/edx-platform,procangroup/edx-platform,gsehub/edx-platform,edx/edx-platform,stvstnfrd/edx-platform,teltek/edx-platform,ahmedaljazzar/edx-platform,angelapper/edx-platform,EDUlib/edx-platform,ahmedaljazzar/edx-platform,kmoocdev2/edx-platform,appsembler/edx-platform,msegado/edx-platform,cpennington/edx-platform,teltek/edx-platform,edx-solutions/edx-platform,hastexo/edx-platform,jolyonb/edx-platform,msegado/edx-platform,edx-solutions/edx-platform,procangroup/edx-platform,procangroup/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,cpennington/edx-platform,Edraak/edraak-platform,Edraak/edraak-platform,procangroup/edx-platform,BehavioralInsightsTeam/edx-platform,ESOedX/edx-platform,gymnasium/edx-platform,msegado/edx-platform,gsehub/edx-platform,edx/edx-platform,proversity-org/edx-platform,teltek/edx-platform,proversity-org/edx-platform,philanthropy-u/edx-platform,eduNEXT/edunext-platform,edx/edx-platform,eduNEXT/edunext-platform,msegado/edx-platform,gsehub/edx-platform,jolyonb/edx-platform,mitocw/edx-platform,cpennington/edx-platform,cpennington/edx-platform,CredoReference/edx-platform,eduNEXT/edx-platform,a-parhom/edx-platform,stvstnfrd/edx-platform,edx-solutions/edx-platform,Stanford-Online/edx-platform,teltek/edx-platform,jolyonb/edx-platform,eduNEXT/edunext-platform,Stanford-Online/edx-platform,gymnasium/edx-platform,a-parhom/edx-platform,appsembler/edx-platform,stvstnfrd/edx-platform,eduNEXT/edx-platform,kmoocdev2/edx-platform,CredoReference/edx-platform,BehavioralInsightsTeam/edx-platform,edx/edx-platform,Stanford-Online/edx-platform,arbrandes/edx-platform,arbrandes/edx-platform,a-parhom/edx-platform,edx-solutions/edx-platform,eduNEXT/edx-platform,TeachAtTUM/edx-platform,CredoReference/edx-platform,msegado/edx-platform,ESOedX/edx-platform,kmoocdev2/edx-platform,gymnasium/edx-platform,ESOedX/edx-platform,jolyonb/edx-platform,angelapper/edx-platform,kmoocdev2/edx-platform,ahmedaljazzar/edx-platform,TeachAtTUM/edx-platform,Stanford-Online/edx-platform,mitocw/edx-platform,eduNEXT/edx-platform,hastexo/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,angelapper/edx-platform,Edraak/edraak-platform,kmoocdev2/edx-platform,hastexo/edx-platform,philanthropy-u/edx-platform,appsembler/edx-platform,eduNEXT/edunext-platform,angelapper/edx-platform,gymnasium/edx-platform,Edraak/edraak-platform,BehavioralInsightsTeam/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,CredoReference/edx-platform,proversity-org/edx-platform,TeachAtTUM/edx-platform,gsehub/edx-platform,a-parhom/edx-platform,ESOedX/edx-platform,BehavioralInsightsTeam/edx-platform,mitocw/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform,hastexo/edx-platform,TeachAtTUM/edx-platform,proversity-org/edx-platform,appsembler/edx-platform
--- +++ @@ -6,6 +6,8 @@ name = 'openedx.core.djangoapps.schedules' verbose_name = _('Schedules') + plugin_app = {} + def ready(self): # noinspection PyUnresolvedReferences from . import signals, tasks # pylint: disable=unused-variable
ca6a758f525c741f277e7e7be115b5b9d20fa5c1
openxc/tools/dump.py
openxc/tools/dump.py
""" This module contains the methods for the ``openxc-dump`` command line program. `main` is executed when ``openxc-dump`` is run, and all other callables in this module are internal only. """ from __future__ import absolute_import import argparse import time from openxc.formats.json import JsonFormatter from .common import device_options, configure_logging, select_device def receive(message, **kwargs): message['timestamp'] = time.time() # TODO update docs on trace file format print(JsonFormatter.serialize(message)) def parse_options(): parser = argparse.ArgumentParser( description="View a raw OpenXC data stream", parents=[device_options()]) parser.add_argument("--corrupted", action="store_true", dest="show_corrupted", default=False, help="don't suppress corrupted messages from output") arguments = parser.parse_args() return arguments def main(): configure_logging() arguments = parse_options() source_class, source_kwargs = select_device(arguments) source = source_class(receive, **source_kwargs) source.start() while True: import time time.sleep(5)
""" This module contains the methods for the ``openxc-dump`` command line program. `main` is executed when ``openxc-dump`` is run, and all other callables in this module are internal only. """ from __future__ import absolute_import import argparse import time from openxc.formats.json import JsonFormatter from .common import device_options, configure_logging, select_device def receive(message, **kwargs): message['timestamp'] = time.time() print(JsonFormatter.serialize(message)) def parse_options(): parser = argparse.ArgumentParser( description="View a raw OpenXC data stream", parents=[device_options()]) parser.add_argument("--corrupted", action="store_true", dest="show_corrupted", default=False, help="don't suppress corrupted messages from output") arguments = parser.parse_args() return arguments def main(): configure_logging() arguments = parse_options() source_class, source_kwargs = select_device(arguments) source = source_class(receive, **source_kwargs) source.start() while True: import time time.sleep(5)
Remove a resolved TODO - trace file formats now standardized.
Remove a resolved TODO - trace file formats now standardized.
Python
bsd-3-clause
openxc/openxc-python,openxc/openxc-python,openxc/openxc-python
--- +++ @@ -14,7 +14,6 @@ def receive(message, **kwargs): message['timestamp'] = time.time() - # TODO update docs on trace file format print(JsonFormatter.serialize(message))
b07037277b28f80aaa6d6f74d6f79d4146b5ee23
oshi_rest_server/oshi_rest_server/urls.py
oshi_rest_server/oshi_rest_server/urls.py
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'oshi_rest_server.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), )
from django.conf.urls import patterns, include, url from django.contrib import admin from rest_framework import routers admin.autodiscover() urlpatterns = patterns('', url(r'^', include(routers.urls)), url(r'^admin/', include(admin.site.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) )
Configure Django REST framework URLs
Configure Django REST framework URLs
Python
isc
ferrarimarco/OSHI-REST-server,ferrarimarco/OSHI-REST-server
--- +++ @@ -1,12 +1,12 @@ from django.conf.urls import patterns, include, url from django.contrib import admin +from rest_framework import routers + admin.autodiscover() urlpatterns = patterns('', - # Examples: - # url(r'^$', 'oshi_rest_server.views.home', name='home'), - # url(r'^blog/', include('blog.urls')), - - url(r'^admin/', include(admin.site.urls)), + url(r'^', include(routers.urls)), + url(r'^admin/', include(admin.site.urls)), + url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) )
72e509be8415e628613b2341c136018ff5bb0f44
openpathsampling/engines/openmm/__init__.py
openpathsampling/engines/openmm/__init__.py
def missing_openmm(*args, **kwargs): raise RuntimeError("Install OpenMM to use this feature") try: import simtk.openmm import simtk.openmm.app except ImportError: HAS_OPENMM = False Engine = missing_openmm empty_snapshot_from_openmm_topology = missing_openmm snapshot_from_pdb = missing_openmm snapshot_from_testsystem = missing_openmm to_openmm_topology = missing_openmm trajectory_from_mdtraj = missing_openmm trajectory_to_mdtraj = missing_openmm Snapshot = missing_openmm MDSnapshot = missing_openmm else: from .engine import OpenMMEngine as Engine from .tools import ( empty_snapshot_from_openmm_topology, snapshot_from_pdb, snapshot_from_testsystem, to_openmm_topology, trajectory_from_mdtraj, trajectory_to_mdtraj ) from . import features from .snapshot import Snapshot, MDSnapshot from openpathsampling.engines import NoEngine, SnapshotDescriptor
def missing_openmm(*args, **kwargs): raise RuntimeError("Install OpenMM to use this feature") try: import simtk.openmm import simtk.openmm.app except ImportError: HAS_OPENMM = False Engine = missing_openmm empty_snapshot_from_openmm_topology = missing_openmm snapshot_from_pdb = missing_openmm snapshot_from_testsystem = missing_openmm to_openmm_topology = missing_openmm trajectory_from_mdtraj = missing_openmm trajectory_to_mdtraj = missing_openmm Snapshot = missing_openmm MDSnapshot = missing_openmm else: from .engine import OpenMMEngine as Engine from .tools import ( empty_snapshot_from_openmm_topology, snapshot_from_pdb, snapshot_from_testsystem, to_openmm_topology, trajectory_from_mdtraj, trajectory_to_mdtraj ) from . import features from .snapshot import Snapshot, MDSnapshot from . import topology from openpathsampling.engines import NoEngine, SnapshotDescriptor
Fix backward compatiblity for MDTrajTopology
Fix backward compatiblity for MDTrajTopology
Python
mit
openpathsampling/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling,dwhswenson/openpathsampling,dwhswenson/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling
--- +++ @@ -29,5 +29,6 @@ from . import features from .snapshot import Snapshot, MDSnapshot + from . import topology from openpathsampling.engines import NoEngine, SnapshotDescriptor
f3be5184dfccdcbbf5b95c7fcd75fbbda8d2ce1c
packages/grid/backend/grid/core/security.py
packages/grid/backend/grid/core/security.py
# stdlib from datetime import datetime from datetime import timedelta from typing import Any from typing import Optional from typing import Union # third party from jose import jwt from passlib.context import CryptContext # grid absolute from grid.core.config import settings pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") ALGORITHM = "HS256" def create_access_token( subject: Union[str, Any], expires_delta: Optional[timedelta] = None ) -> str: if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta( minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES ) to_encode = {"exp": expire, "sub": str(subject)} encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt def verify_password(plain_password: str, hashed_password: str) -> bool: return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password: str) -> str: return pwd_context.hash(password)
# stdlib from datetime import datetime from datetime import timedelta from typing import Any from typing import Optional from typing import Union # third party from jose import jwt from passlib.context import CryptContext # grid absolute from grid.core.config import settings pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") ALGORITHM = "HS256" def create_access_token( subject: Union[str, Any], expires_delta: Optional[timedelta] = None, guest: bool = False, ) -> str: if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta( minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES ) to_encode = {"exp": expire, "sub": str(subject), "guest": guest} encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt def verify_password(plain_password: str, hashed_password: str) -> bool: return pwd_context.verify(plain_password, hashed_password) def get_password_hash(password: str) -> str: return pwd_context.hash(password)
UPDATE create_access_token to handle guest sessions
UPDATE create_access_token to handle guest sessions
Python
apache-2.0
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
--- +++ @@ -19,7 +19,9 @@ def create_access_token( - subject: Union[str, Any], expires_delta: Optional[timedelta] = None + subject: Union[str, Any], + expires_delta: Optional[timedelta] = None, + guest: bool = False, ) -> str: if expires_delta: expire = datetime.utcnow() + expires_delta @@ -27,7 +29,7 @@ expire = datetime.utcnow() + timedelta( minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES ) - to_encode = {"exp": expire, "sub": str(subject)} + to_encode = {"exp": expire, "sub": str(subject), "guest": guest} encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt
ef41e90cf49856a6d0ca1b363440edb542dd2e0d
tests/test_config.py
tests/test_config.py
# Copyright 2015-2016 Masayuki Yamamoto # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest of configuration loading.""" import unittest class TestUserPath(unittest.TestCase): """Test for yanico.config.user_path()."""
# Copyright 2015-2016 Masayuki Yamamoto # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest of configuration loading.""" import os import unittest from unittest import mock from yanico import config class TestUserPath(unittest.TestCase): """Test for yanico.config.user_path().""" @mock.patch.dict(os.environ, {'HOME': 'spam'}) def test_path(self): """Expect filepath joinning '.yanico.conf' under $HOME.""" if os.sep == '\\': expect = 'spam\\.yanico.conf' elif os.sep == '/': expect = 'spam/.yanico.conf' result = config.user_path() self.assertEqual(result, expect)
Add test case for user path
Add test case for user path Expect filepath joinning '.yanico.conf' under $HOME.
Python
apache-2.0
ma8ma/yanico
--- +++ @@ -13,8 +13,22 @@ # limitations under the License. """Unittest of configuration loading.""" +import os import unittest +from unittest import mock + +from yanico import config class TestUserPath(unittest.TestCase): """Test for yanico.config.user_path().""" + + @mock.patch.dict(os.environ, {'HOME': 'spam'}) + def test_path(self): + """Expect filepath joinning '.yanico.conf' under $HOME.""" + if os.sep == '\\': + expect = 'spam\\.yanico.conf' + elif os.sep == '/': + expect = 'spam/.yanico.conf' + result = config.user_path() + self.assertEqual(result, expect)
b910900f72d6b25cb05c56563968aad102429c25
gmrh/RepositoryHandler.py
gmrh/RepositoryHandler.py
import os.path import subprocess class DefaultRepositoryHandler(): def __init__(self, cwd): self.cwd = cwd def repository_exists(self, path): return os.path.exists(path + os.path.sep + '.git') def update_repository(self, path, remote_url, remote_branch): print 'Updating repository %s ...' % path subprocess.check_call(['git', 'pull', '--rebase']) def clone_repository(self, path, remote_url, remote_branch): print 'Cloning repository %s ...' % path subprocess.check_call(['git', 'clone', remote_url, '-b', remote_branch, path]) def update_or_clone(self, path, remote_url, remote_branch): if self.repository_exists(path): self.update_repository(path, remote_url, remote_branch) else: self.clone_repository(path, remote_url, remote_branch)
import os.path import subprocess class DefaultRepositoryHandler(): def __init__(self, cwd): self.cwd = cwd def repository_exists(self, path): return os.path.exists(path + os.path.sep + '.git') def update_repository(self, path, remote_url, remote_branch): print 'Updating repository %s ...' % path remote_name = 'origin' url = subprocess.check_output(['git', 'config', 'remote.%s.url' % remote_name], cwd=path) if url != remote_url: subprocess.check_call(['git', 'config', 'remote.%s.url' % remote_name, remote_url], cwd=path) subprocess.check_call(['git', 'pull', '--rebase'], cwd=path) def clone_repository(self, path, remote_url, remote_branch): print 'Cloning repository %s ...' % path subprocess.check_call(['git', 'clone', remote_url, '-b', remote_branch, path]) def update_or_clone(self, path, remote_url, remote_branch): if self.repository_exists(path): self.update_repository(path, remote_url, remote_branch) else: self.clone_repository(path, remote_url, remote_branch)
Check the url of the remote & ensure the updating commands are executed in the correct directory
Check the url of the remote & ensure the updating commands are executed in the correct directory
Python
bsd-3-clause
solarnz/polygamy,solarnz/polygamy
--- +++ @@ -11,7 +11,14 @@ def update_repository(self, path, remote_url, remote_branch): print 'Updating repository %s ...' % path - subprocess.check_call(['git', 'pull', '--rebase']) + + remote_name = 'origin' + + url = subprocess.check_output(['git', 'config', 'remote.%s.url' % remote_name], cwd=path) + if url != remote_url: + subprocess.check_call(['git', 'config', 'remote.%s.url' % remote_name, remote_url], cwd=path) + + subprocess.check_call(['git', 'pull', '--rebase'], cwd=path) def clone_repository(self, path, remote_url, remote_branch): print 'Cloning repository %s ...' % path
5a1c48403a912eedc4f5d87215fafdb05eb49ed5
Python/find-all-duplicates-in-an-array.py
Python/find-all-duplicates-in-an-array.py
# Time: O(n) # Space: O(1) # Given an array of integers, 1 <= a[i] <= n (n = size of array), # some elements appear twice and others appear once. # Find all the elements that appear twice in this array. # Could you do it without extra space and in O(n) runtime? # # Example: # Input # # [4,3,2,7,8,2,3,1] # # Output # # [2,3] class Solution(object): def findDuplicates(self, nums): """ :type nums: List[int] :rtype: List[int] """ result = [] i = 0 while i < len(nums): if nums[i] != nums[nums[i]-1]: nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1] else: i += 1 for i in xrange(len(nums)): if i != nums[i]-1: result.append(nums[i]) return result
# Time: O(n) # Space: O(1) # Given an array of integers, 1 <= a[i] <= n (n = size of array), # some elements appear twice and others appear once. # Find all the elements that appear twice in this array. # Could you do it without extra space and in O(n) runtime? # # Example: # Input # # [4,3,2,7,8,2,3,1] # # Output # # [2,3] class Solution(object): def findDuplicates(self, nums): """ :type nums: List[int] :rtype: List[int] """ result = [] i = 0 while i < len(nums): if nums[i] != nums[nums[i]-1]: nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1] else: i += 1 for i in xrange(len(nums)): if i != nums[i]-1: result.append(nums[i]) return result from collections import Counter class Solution2(object): def findDuplicates(self, nums): """ :type nums: List[int] :rtype: List[int] """ return [elem for elem, count in Counter(nums).items() if count == 2]
Add alternative solution for 'Find all duplicates in an array'
Add alternative solution for 'Find all duplicates in an array'
Python
mit
kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015
--- +++ @@ -33,3 +33,15 @@ if i != nums[i]-1: result.append(nums[i]) return result + + +from collections import Counter + + +class Solution2(object): + def findDuplicates(self, nums): + """ + :type nums: List[int] + :rtype: List[int] + """ + return [elem for elem, count in Counter(nums).items() if count == 2]
3f5e90a19881884408003ed01b04b4a29b3bd2fd
test/command_source/TestCommandSource.py
test/command_source/TestCommandSource.py
""" Test that lldb command "command source" works correctly. See also http://llvm.org/viewvc/llvm-project?view=rev&revision=109673. """ import os, time import unittest2 import lldb from lldbtest import * class CommandSourceTestCase(TestBase): mydir = "command_source" def test_command_source(self): """Test that lldb command "command source" works correctly.""" # Sourcing .lldb in the current working directory, which in turn imports # the "my" package that defines the date() function. self.runCmd("command source .lldb") # Let's temporarily redirect the stdout to our StringIO session object # in order to capture the script evaluation output. old_stdout = sys.stdout session = StringIO.StringIO() sys.stdout = session # Python should evaluate "my.date()" successfully. self.runCmd("script my.date()") # Now restore stdout to the way we were. :-) sys.stdout = old_stdout import datetime self.expect(session.getvalue(), "script my.date() runs successfully", exe=False, substrs = [str(datetime.date.today())]) if __name__ == '__main__': import atexit lldb.SBDebugger.Initialize() atexit.register(lambda: lldb.SBDebugger.Terminate()) unittest2.main()
""" Test that lldb command "command source" works correctly. See also http://llvm.org/viewvc/llvm-project?view=rev&revision=109673. """ import os, sys import unittest2 import lldb from lldbtest import * class CommandSourceTestCase(TestBase): mydir = "command_source" def test_command_source(self): """Test that lldb command "command source" works correctly.""" # Sourcing .lldb in the current working directory, which in turn imports # the "my" package that defines the date() function. self.runCmd("command source .lldb") # Let's temporarily redirect the stdout to our StringIO session object # in order to capture the script evaluation output. old_stdout = sys.stdout session = StringIO.StringIO() sys.stdout = session # Python should evaluate "my.date()" successfully. self.runCmd("script my.date()") # Now restore stdout to the way we were. :-) sys.stdout = old_stdout import datetime self.expect(session.getvalue(), "script my.date() runs successfully", exe=False, substrs = [str(datetime.date.today())]) if __name__ == '__main__': import atexit lldb.SBDebugger.Initialize() atexit.register(lambda: lldb.SBDebugger.Terminate()) unittest2.main()
Add "import sys" for sys.stdout.
Add "import sys" for sys.stdout. git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@124504 91177308-0d34-0410-b5e6-96231b3b80d8
Python
apache-2.0
apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb
--- +++ @@ -4,7 +4,7 @@ See also http://llvm.org/viewvc/llvm-project?view=rev&revision=109673. """ -import os, time +import os, sys import unittest2 import lldb from lldbtest import *
73243dc74b416fc42ccb8b684924f0b2a09919a3
SnakesLadders/SnakesLadders.py
SnakesLadders/SnakesLadders.py
class State(object): def __init__(self, ix): self.index = ix self.link = None # placeholder, not None if Snake or Ladder def process(self): """Action when landed upon""" if self.link: if self.link > self.index: # Ladder! return self.link else: # Snake! return self.link else: # link is None: "Normal" = not a snake or ladder return self.index class GameFSM(object): def __init__(self, n): self.all_states = [] for ix in range(n+1): self.all_states.append(State(ix)) game = GameFSM(16) print(game.all_states) # Ladders game.all_states[2].link = 10 game.all_states[8].link = 14
class State(object): def __init__(self, ix): self.index = ix self.link = None # placeholder, not None if Snake or Ladder def process(self): """Action when landed upon""" if self.link: if self.link > self.index: # Ladder! return self.link else: # Snake! return self.link else: # link is None: "Normal" = not a snake or ladder return self.index class GameFSM(object): def __init__(self, n): self.all_states = [] self.position = 0 self.n = n for ix in range(n+1): self.all_states.append(State(ix)) def move(self, die): """die is an integer """ inter_pos = self.position + die state_obj = self.all_states[inter_pos] final_pos = state_obj.process() self.position = final_pos def run(self): print("Starting game!") while self.position < self.n: # roll die die = roll() # move based on die roll self.move(die) # record results print("Game over!") game = GameFSM(16) print(game.all_states) # Ladders game.all_states[2].link = 10 game.all_states[8].link = 14
Update to move and run
Update to move and run
Python
cc0-1.0
robclewley/DataScotties
--- +++ @@ -19,9 +19,29 @@ class GameFSM(object): def __init__(self, n): self.all_states = [] + self.position = 0 + self.n = n for ix in range(n+1): self.all_states.append(State(ix)) - + + def move(self, die): + """die is an integer + """ + inter_pos = self.position + die + state_obj = self.all_states[inter_pos] + final_pos = state_obj.process() + self.position = final_pos + + def run(self): + print("Starting game!") + while self.position < self.n: + # roll die + die = roll() + # move based on die roll + self.move(die) + # record results + print("Game over!") + game = GameFSM(16) print(game.all_states)
a4e959c1aeb705128898f07bdf9c9fb315ba593c
flexget/tests/test_plugin_interfaces.py
flexget/tests/test_plugin_interfaces.py
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from flexget import plugin class TestInterfaces(object): """Test that any plugins declaring certain interfaces at least superficially comply with those interfaces.""" def test_task_interface(self): plugin.load_plugins() task_plugins = plugin.get_plugins(interface='task') for p in task_plugins: assert isinstance(p.schema, dict), 'Task interface requires a schema to be defined.' assert p.phase_handlers, 'Task plugins should have at least on phase handler (on_task_X) method.' def test_list_interface(self): plugin.load_plugins() task_plugins = plugin.get_plugins(interface='list') for p in task_plugins: assert isinstance(p.schema, dict), 'List interface requires a schema to be defined.' assert hasattr(p.instance, 'get_list'), 'List plugins must implement a get_list method.'
from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from flexget import plugin class TestInterfaces(object): """Test that any plugins declaring certain interfaces at least superficially comply with those interfaces.""" def get_plugins(self, interface): plugins = list(plugin.get_plugins(interface=interface)) assert plugins, 'No plugins for this interface found.' return plugins def test_task_interface(self): for p in self.get_plugins('task'): assert isinstance(p.schema, dict), 'Task interface requires a schema to be defined.' assert p.phase_handlers, 'Task plugins should have at least on phase handler (on_task_X) method.' def test_list_interface(self): for p in self.get_plugins('list'): assert isinstance(p.schema, dict), 'List interface requires a schema to be defined.' assert hasattr(p.instance, 'get_list'), 'List plugins must implement a get_list method.' def test_search_interface(self): for p in self.get_plugins('search'): assert isinstance(p.schema, dict), 'Search interface requires a schema to be defined.' assert hasattr(p.instance, 'search'), 'Search plugins must implement a search method.'
Add tests for search interface plugins
Add tests for search interface plugins
Python
mit
malkavi/Flexget,tobinjt/Flexget,jacobmetrick/Flexget,jawilson/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,qk4l/Flexget,OmgOhnoes/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,crawln45/Flexget,jacobmetrick/Flexget,jawilson/Flexget,jacobmetrick/Flexget,tobinjt/Flexget,poulpito/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,ianstalk/Flexget,ianstalk/Flexget,Flexget/Flexget,LynxyssCZ/Flexget,sean797/Flexget,qk4l/Flexget,crawln45/Flexget,jawilson/Flexget,Flexget/Flexget,crawln45/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,poulpito/Flexget,gazpachoking/Flexget,sean797/Flexget,Flexget/Flexget,malkavi/Flexget,sean797/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,poulpito/Flexget,tobinjt/Flexget,Danfocus/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,ianstalk/Flexget,Danfocus/Flexget,Danfocus/Flexget
--- +++ @@ -6,17 +6,22 @@ class TestInterfaces(object): """Test that any plugins declaring certain interfaces at least superficially comply with those interfaces.""" + def get_plugins(self, interface): + plugins = list(plugin.get_plugins(interface=interface)) + assert plugins, 'No plugins for this interface found.' + return plugins + def test_task_interface(self): - plugin.load_plugins() - task_plugins = plugin.get_plugins(interface='task') - for p in task_plugins: + for p in self.get_plugins('task'): assert isinstance(p.schema, dict), 'Task interface requires a schema to be defined.' assert p.phase_handlers, 'Task plugins should have at least on phase handler (on_task_X) method.' - def test_list_interface(self): - plugin.load_plugins() - task_plugins = plugin.get_plugins(interface='list') - for p in task_plugins: + for p in self.get_plugins('list'): assert isinstance(p.schema, dict), 'List interface requires a schema to be defined.' assert hasattr(p.instance, 'get_list'), 'List plugins must implement a get_list method.' + + def test_search_interface(self): + for p in self.get_plugins('search'): + assert isinstance(p.schema, dict), 'Search interface requires a schema to be defined.' + assert hasattr(p.instance, 'search'), 'Search plugins must implement a search method.'
11cf55cf2859a23edd2d1dba56e574d01cacce4f
apps/funding/forms.py
apps/funding/forms.py
from django import forms from django.utils.translation import ugettext_lazy as _, ugettext as __ from .models import Funding from .widgets import PerksAmountWidget class FundingForm(forms.Form): required_css_class = 'required' amount = forms.DecimalField(label=_("Amount"), decimal_places=2, widget=PerksAmountWidget()) name = forms.CharField(label=_("Name"), required=False, help_text=_("Optional name for public list of contributors")) email = forms.EmailField(label=_("Contact e-mail"), help_text=_("We'll use it to contact you about your perks and fundraiser status and payment updates.<br/> " "Won't be publicised."), required=False) def __init__(self, offer, *args, **kwargs): self.offer = offer super(FundingForm, self).__init__(*args, **kwargs) self.fields['amount'].widget.form_instance = self def clean_amount(self): if self.cleaned_data['amount'] <= 0: raise forms.ValidationError(__("Enter positive amount.")) return self.cleaned_data['amount'] def clean(self): if not self.offer.is_current(): raise forms.ValidationError(__("This offer is out of date.")) return self.cleaned_data def save(self): return Funding.objects.create( offer=self.offer, name=self.cleaned_data['name'], email=self.cleaned_data['email'], amount=self.cleaned_data['amount'], )
from django import forms from django.utils.translation import ugettext_lazy as _, ugettext as __ from .models import Funding from .widgets import PerksAmountWidget class FundingForm(forms.Form): required_css_class = 'required' amount = forms.DecimalField(label=_("Amount"), decimal_places=2, widget=PerksAmountWidget()) name = forms.CharField(label=_("Name"), required=False, help_text=_("Optional name for public list of contributors")) email = forms.EmailField(label=_("Contact e-mail"), help_text=_("We'll use it to contact you about your perks and fundraiser status and payment updates.<br/> " "Won't be publicised."), required=False) def __init__(self, offer, *args, **kwargs): self.offer = offer super(FundingForm, self).__init__(*args, **kwargs) self.fields['amount'].widget.form_instance = self def clean_amount(self): if self.cleaned_data['amount'] <= 0: raise forms.ValidationError(__("Enter positive amount.")) return self.cleaned_data['amount'] def clean(self): if not self.offer.is_current(): raise forms.ValidationError(__("This offer is out of date.")) return self.cleaned_data def save(self): funding = Funding.objects.create( offer=self.offer, name=self.cleaned_data['name'], email=self.cleaned_data['email'], amount=self.cleaned_data['amount'], ) funding.perks = funding.offer.get_perks(funding.amount) return funding
Set perks on form save.
Set perks on form save.
Python
agpl-3.0
fnp/wolnelektury,fnp/wolnelektury,fnp/wolnelektury,fnp/wolnelektury
--- +++ @@ -31,10 +31,12 @@ return self.cleaned_data def save(self): - return Funding.objects.create( + funding = Funding.objects.create( offer=self.offer, name=self.cleaned_data['name'], email=self.cleaned_data['email'], amount=self.cleaned_data['amount'], ) + funding.perks = funding.offer.get_perks(funding.amount) + return funding
145a72dc8439d4479b64a79e7c94cbc12f4afdd7
test/__init__.py
test/__init__.py
"""Package for regression testing of the blender nif scripts.""" import bpy def setup(self): """Enables the nif scripts addon, so all tests can use it.""" bpy.ops.wm.addon_enable(module="io_scene_nif") def teardown(self): """Disables the nif scripts addon.""" bpy.ops.wm.addon_disable(module="io_scene_nif")
"""Package for regression testing of the blender nif scripts.""" import bpy def setup(self): """Enables the nif scripts addon, so all tests can use it.""" bpy.ops.wm.addon_enable(module="io_scene_nif") # remove default objects for obj in bpy.data.objects[:]: bpy.context.scene.objects.unlink(obj) bpy.data.objects.remove(obj) def teardown(self): """Disables the nif scripts addon.""" bpy.ops.wm.addon_disable(module="io_scene_nif")
Remove startup objects when setting up the test module.
Remove startup objects when setting up the test module.
Python
bsd-3-clause
amorilia/blender_nif_plugin,nightstrike/blender_nif_plugin,amorilia/blender_nif_plugin,nightstrike/blender_nif_plugin
--- +++ @@ -5,6 +5,10 @@ def setup(self): """Enables the nif scripts addon, so all tests can use it.""" bpy.ops.wm.addon_enable(module="io_scene_nif") + # remove default objects + for obj in bpy.data.objects[:]: + bpy.context.scene.objects.unlink(obj) + bpy.data.objects.remove(obj) def teardown(self): """Disables the nif scripts addon."""
5d6b384a2f1b8caa9421b428e5d81aaa1d9a82e1
tests/correct.py
tests/correct.py
"""Check ringtophat results against scipy.ndimage.convolve"""
"""Check ringtophat results against scipy.ndimage.convolve""" import unittest from numpy.testing import assert_equal, assert_almost_equal import numpy as np import ringtophat class TestKernels(unittest.TestCase): def test_binary_disk(self): actual = ringtophat.binary_disk(1) desired = np.array([[False, True, False], [ True, True, True], [False, True, False]]) assert_equal(actual, desired) def test_binary_ring(self): actual = ringtophat.binary_ring(1, 2) desired = np.array([[False, False, True, False, False], [False, True, True, True, False], [ True, True, False, True, True], [False, True, True, True, False], [False, False, True, False, False]]) assert_equal(actual, desired) if __name__ == '__main__': unittest.main()
Add first tests. disk and ring masks for simple cases.
Add first tests. disk and ring masks for simple cases.
Python
bsd-3-clause
gammapy/ringtophat,gammapy/ringtophat
--- +++ @@ -1 +1,25 @@ """Check ringtophat results against scipy.ndimage.convolve""" +import unittest +from numpy.testing import assert_equal, assert_almost_equal +import numpy as np +import ringtophat + +class TestKernels(unittest.TestCase): + def test_binary_disk(self): + actual = ringtophat.binary_disk(1) + desired = np.array([[False, True, False], + [ True, True, True], + [False, True, False]]) + assert_equal(actual, desired) + def test_binary_ring(self): + actual = ringtophat.binary_ring(1, 2) + desired = np.array([[False, False, True, False, False], + [False, True, True, True, False], + [ True, True, False, True, True], + [False, True, True, True, False], + [False, False, True, False, False]]) + assert_equal(actual, desired) + +if __name__ == '__main__': + unittest.main() +
2974972ce36f1cd2dec99a18edc49ac374cdf458
tools/fitsevt.py
tools/fitsevt.py
#! /usr/bin/python3 import sys import os import math from astropy.io import fits inputFolder = sys.argv[1] outputFolder = sys.argv[2] eLo = int(sys.argv[3]) eHi = int(sys.argv[4]) binSize = int(sys.argv[5]) fnames = os.listdir(inputFolder) for fname in fnames: print(fname) hdulist = fits.open(inputFolder+"/"+fname) for i in range(1,5): timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"] nBins = math.ceil(timeRange/binSize) count = [0]*nBins for event in hdulist[i].data: if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi): index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange ) count[index] += 1 sigClass = 1 with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f: f.write("{0} {1}\n".format(nBins,sigClass)) for j in range(nBins): f.write("{0}\n".format(count[j]))
#! /usr/bin/python3 import sys import os import math from astropy.io import fits inputFolder = sys.argv[1] outputFolder = sys.argv[2] eLo = int(sys.argv[3]) eHi = int(sys.argv[4]) binSize = int(sys.argv[5]) fnames = os.listdir(inputFolder) for fname in fnames: print(fname) hdulist = fits.open(inputFolder+"/"+fname) for i in range(1,5): timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"] nBins = math.ceil(timeRange/binSize) count = [0]*nBins for event in hdulist[i].data: if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi): index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange ) count[index] += 1 sigClass = 1 outputFile = outputFolder+"/"+os.path.splitext(fname)[0]+"_Q{0}.txt".format(i) with open(outputFile,'w') as f: f.write("{0} {1}\n".format(nBins,sigClass)) for j in range(nBins): f.write("{0}\n".format(count[j]))
Change output filenaming to have .txt
Change output filenaming to have .txt
Python
mit
fauzanzaid/IUCAA-GRB-detection-Feature-extraction
--- +++ @@ -10,7 +10,6 @@ eLo = int(sys.argv[3]) eHi = int(sys.argv[4]) binSize = int(sys.argv[5]) - fnames = os.listdir(inputFolder) @@ -30,7 +29,8 @@ sigClass = 1 - with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f: + outputFile = outputFolder+"/"+os.path.splitext(fname)[0]+"_Q{0}.txt".format(i) + with open(outputFile,'w') as f: f.write("{0} {1}\n".format(nBins,sigClass)) for j in range(nBins): f.write("{0}\n".format(count[j]))
96871ef8de84653406749a2e503ef4b4fb800b2f
src/mist/io/tests/features/steps/shell.py
src/mist/io/tests/features/steps/shell.py
""" @given: ------- @when: ------ I type the "{command}" shell command --> shell_command @then: ------ I should see the "{command}" result in shell output --> shell_output ------ """ @when(u'I type the "{command}" shell command') def shell_command(context, command): shell_input = context.browser.find_by_css('#shell-input input') shell_input.type(command) shell_enter = context.browser.find_by_css('#shell-submit .ui-btn') shell_enter.click() @then(u'I should see the "{command}" result in shell output') def shell_output(context, command): shell_output = context.browser.find_by_css('#shell-response .ui-btn') for output in shell_output: if command in output.text: return assert False, u'Could not find the output of %s command' % command
""" @given: ------- @when: ------ I type the "{command}" shell command --> shell_command @then: ------ I should see the "{command}" result in shell output --> shell_output ------ """ @when(u'I type the "{command}" shell command') def shell_command(context, command): shell_input = context.browser.find_by_css('#shell-input input') shell_input.type(command) shell_enter = context.browser.find_by_css('#shell-submit .ui-btn') shell_enter.click() @then(u'I should see the "{command}" result in shell output') def shell_output(context, command): shell_output = context.browser.find_by_css('#shell-return h3') for output in shell_output: if command in output.text: return assert False, u'Could not find the output of %s command' % command
Fix Shell tests according to css changes
Fix Shell tests according to css changes
Python
agpl-3.0
kelonye/mist.io,Lao-liu/mist.io,DimensionDataCBUSydney/mist.io,Lao-liu/mist.io,johnnyWalnut/mist.io,DimensionDataCBUSydney/mist.io,zBMNForks/mist.io,munkiat/mist.io,Lao-liu/mist.io,afivos/mist.io,munkiat/mist.io,munkiat/mist.io,afivos/mist.io,johnnyWalnut/mist.io,Lao-liu/mist.io,munkiat/mist.io,DimensionDataCBUSydney/mist.io,DimensionDataCBUSydney/mist.io,zBMNForks/mist.io,zBMNForks/mist.io,johnnyWalnut/mist.io,afivos/mist.io,kelonye/mist.io,kelonye/mist.io
--- +++ @@ -26,7 +26,7 @@ @then(u'I should see the "{command}" result in shell output') def shell_output(context, command): - shell_output = context.browser.find_by_css('#shell-response .ui-btn') + shell_output = context.browser.find_by_css('#shell-return h3') for output in shell_output: if command in output.text:
3c8f7752a6b47a8ce5395b9038a9280fe0369aae
risk_engine_tests.py
risk_engine_tests.py
from risk_engine import RiskEngine import unittest class RiskEngineTestCase(unittest.TestCase): def setUp(self): self.engine = RiskEngine() def test_roll_is_near_even(self): n = 1000000 payout_count = 0 for roll in range(n): if self.engine.run(): payout_count += 1 payout_percentage = float(payout_count)/float(n) print(payout_percentage) assert (payout_percentage > 0.0 and payout_percentage <= 0.49) if __name__ == '__main__': unittest.main()
from risk_engine import RiskEngine import unittest class RiskEngineTestCase(unittest.TestCase): def setUp(self): self.engine = RiskEngine() def test_roll_is_near_even(self): n = 1000000 payout_count = 0 for roll in range(n): if self.engine.run(): payout_count += 1 payout_percentage = float(payout_count)/float(n) print(payout_percentage) assert (payout_percentage > 0.0 and payout_percentage <= 0.499) if __name__ == '__main__': unittest.main()
Improve the users odds a little
Improve the users odds a little
Python
mit
joelklabo/risk
--- +++ @@ -14,7 +14,7 @@ payout_count += 1 payout_percentage = float(payout_count)/float(n) print(payout_percentage) - assert (payout_percentage > 0.0 and payout_percentage <= 0.49) + assert (payout_percentage > 0.0 and payout_percentage <= 0.499) if __name__ == '__main__': unittest.main()
c3ca44b17b9e14e8570083ab49be4da8e64757bc
scripts/filter_fasta_on_length.py
scripts/filter_fasta_on_length.py
#!/usr/bin/env python """filter_fasta_on_length.py Filter a fasta file so that the resulting sequences are all longer or equal to the length threshold parameter. Only accepts stdin.""" import argparse import sys from Bio import SeqIO def main(args): seqs = [] for i, seq in enumerate(SeqIO.parse(sys.stdin, "fasta")): if len(seq) >= args.length_threshold: seqs.append(seq) if i % 1000 == 0 and len(seqs): SeqIO.write(seqs, sys.stdout, 'fasta') seqs = [] if len(seqs): SeqIO.write(seqs, sys.stdout, 'fasta') if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-l", "--length_threshold", type=int, help="Length trheshold to filter on.") args = parser.parse_args() main(args)
#!/usr/bin/env python """filter_fasta_on_length.py Filter a fasta file so that the resulting sequences are all longer or equal to the length threshold parameter. Only accepts stdin.""" import argparse import sys from Bio import SeqIO def main(args): seqs = [] if args.input_fasta is not None: input_handle = open(args.input_fasta, 'r') else: input_handle = sys.stdin for i, seq in enumerate(SeqIO.parse(input_handle, "fasta")): if len(seq) >= args.length_threshold: seqs.append(seq) if i % 1000 == 0 and len(seqs): SeqIO.write(seqs, sys.stdout, 'fasta') seqs = [] if len(seqs): SeqIO.write(seqs, sys.stdout, 'fasta') input_handle.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--input_fasta", help="Input file") parser.add_argument("-l", "--length_threshold", type=int, help="Length threshold to filter on.") args = parser.parse_args() main(args)
Use either file input or stdin for filtering length
Use either file input or stdin for filtering length
Python
mit
EnvGen/toolbox,EnvGen/toolbox
--- +++ @@ -10,7 +10,12 @@ def main(args): seqs = [] - for i, seq in enumerate(SeqIO.parse(sys.stdin, "fasta")): + if args.input_fasta is not None: + input_handle = open(args.input_fasta, 'r') + else: + input_handle = sys.stdin + + for i, seq in enumerate(SeqIO.parse(input_handle, "fasta")): if len(seq) >= args.length_threshold: seqs.append(seq) if i % 1000 == 0 and len(seqs): @@ -20,9 +25,12 @@ if len(seqs): SeqIO.write(seqs, sys.stdout, 'fasta') + input_handle.close() + if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument("-l", "--length_threshold", type=int, help="Length trheshold to filter on.") + parser.add_argument("--input_fasta", help="Input file") + parser.add_argument("-l", "--length_threshold", type=int, help="Length threshold to filter on.") args = parser.parse_args()
91c502beb68acff1ad5194a572d5b75f607b7d00
ckanext/qa/helpers.py
ckanext/qa/helpers.py
from ckan.plugins import toolkit as tk def qa_openness_stars_resource_html(resource): qa = resource.get('qa') if not qa: return '<!-- No qa info for this resource -->' extra_vars = qa return tk.literal( tk.render('qa/openness_stars.html', extra_vars=extra_vars)) def qa_openness_stars_dataset_html(dataset): qa = dataset.get('qa') if not qa: return '<!-- No qa info for this dataset -->' extra_vars = qa return tk.literal( tk.render('qa/openness_stars_brief.html', extra_vars=extra_vars))
import copy from ckan.plugins import toolkit as tk def qa_openness_stars_resource_html(resource): qa = resource.get('qa') if not qa: return '<!-- No qa info for this resource -->' # Take a copy of the qa dict, because weirdly the renderer appears to add # keys to it like _ and app_globals. This is bad because when it comes to # render the debug in the footer those extra keys take about 30s to render, # for some reason. extra_vars = copy.deepcopy(qa) return tk.literal( tk.render('qa/openness_stars.html', extra_vars=extra_vars)) def qa_openness_stars_dataset_html(dataset): qa = dataset.get('qa') if not qa: return '<!-- No qa info for this dataset -->' extra_vars = copy.deepcopy(qa) return tk.literal( tk.render('qa/openness_stars_brief.html', extra_vars=extra_vars))
Fix bug that meant rendering debug info was v slow.
Fix bug that meant rendering debug info was v slow.
Python
mit
ckan/ckanext-qa,ckan/ckanext-qa,ckan/ckanext-qa
--- +++ @@ -1,3 +1,4 @@ +import copy from ckan.plugins import toolkit as tk @@ -5,7 +6,11 @@ qa = resource.get('qa') if not qa: return '<!-- No qa info for this resource -->' - extra_vars = qa + # Take a copy of the qa dict, because weirdly the renderer appears to add + # keys to it like _ and app_globals. This is bad because when it comes to + # render the debug in the footer those extra keys take about 30s to render, + # for some reason. + extra_vars = copy.deepcopy(qa) return tk.literal( tk.render('qa/openness_stars.html', extra_vars=extra_vars)) @@ -15,7 +20,7 @@ qa = dataset.get('qa') if not qa: return '<!-- No qa info for this dataset -->' - extra_vars = qa + extra_vars = copy.deepcopy(qa) return tk.literal( tk.render('qa/openness_stars_brief.html', extra_vars=extra_vars))
e57d8f885c2be591bcfa7b5f337495cdb2e6ce64
ooni/tests/test_errors.py
ooni/tests/test_errors.py
from twisted.trial import unittest import ooni.errors class TestErrors(unittest.TestCase): def test_catch_child_failures_before_parent_failures(self): """ Verify that more specific Failures are caught first by handleAllFailures() and failureToString(). Fails if a subclass is listed after it's parent Failure. """ # Check each Failure against all subsequent failures for index, (failure, _) in enumerate(ooni.errors.known_failures): for sub_failure, _ in ooni.errors.known_failures[index+1:]: # Fail if subsequent Failure inherits from the current Failure self.assertNotIsInstance(sub_failure(None), failure)
from twisted.trial import unittest import ooni.errors class TestErrors(unittest.TestCase): def test_catch_child_failures_before_parent_failures(self): """ Verify that more specific Failures are caught first by handleAllFailures() and failureToString(). Fails if a subclass is listed after its parent Failure. """ # Check each Failure against all subsequent failures for index, (failure, _) in enumerate(ooni.errors.known_failures): for sub_failure, _ in ooni.errors.known_failures[index+1:]: # Fail if subsequent Failure inherits from the current Failure self.assertNotIsInstance(sub_failure(None), failure)
Fix typo in docstring spotted by @armadev
Fix typo in docstring spotted by @armadev
Python
bsd-2-clause
0xPoly/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe
--- +++ @@ -9,7 +9,7 @@ Verify that more specific Failures are caught first by handleAllFailures() and failureToString(). - Fails if a subclass is listed after it's parent Failure. + Fails if a subclass is listed after its parent Failure. """ # Check each Failure against all subsequent failures
3d97a2ca6c4c285d59e3c823fbee94a494e85ba0
app/tests/tests.py
app/tests/tests.py
import unittest from app import serve class PageCase(unittest.TestCase): def setUp(self): serve.app.config['TESTING'] = True self.app = serve.app.test_client() def test_index_load(self): self.page_test('/', b'') def test_robots_load(self): self.page_test('/robots.txt', b'') def test_sitemap_load(self): self.page_test('/sitemap.xml', b'') def test_not_found(self): response = self.app.get('/asdf') self.assertEqual(response.status_code, 404) self.assertIn(b'Not Found', response.get_data()) def page_test(self, path, string): response = self.app.get(path) self.assertEqual(response.status_code, 200) self.assertIn(string, response.get_data())
import unittest from varsnap import TestVarSnap # noqa: F401 from app import serve class PageCase(unittest.TestCase): def setUp(self): serve.app.config['TESTING'] = True self.app = serve.app.test_client() def test_index_load(self): self.page_test('/', b'') def test_robots_load(self): self.page_test('/robots.txt', b'') def test_sitemap_load(self): self.page_test('/sitemap.xml', b'') def test_not_found(self): response = self.app.get('/asdf') self.assertEqual(response.status_code, 404) self.assertIn(b'Not Found', response.get_data()) def page_test(self, path, string): response = self.app.get(path) self.assertEqual(response.status_code, 200) self.assertIn(string, response.get_data())
Add TestVarSnap as a TestCase
Add TestVarSnap as a TestCase
Python
mit
albertyw/base-flask,albertyw/base-flask,albertyw/base-flask,albertyw/base-flask
--- +++ @@ -1,4 +1,6 @@ import unittest + +from varsnap import TestVarSnap # noqa: F401 from app import serve
1975d5391f058f85272def4435b243440b72bff6
weather/admin.py
weather/admin.py
from django.contrib.admin import ModelAdmin, register from django.contrib.gis.admin import GeoModelAdmin from weather.models import WeatherStation, Location @register(Location) class LocationAdmin(GeoModelAdmin): openlayers_url = '//static.dpaw.wa.gov.au/static/libs/openlayers/2.13.1/OpenLayers.js' list_display = ('pk', 'title', 'point', 'height') @register(WeatherStation) class WeatherStationAdmin(ModelAdmin): list_display = ( 'name', 'manufacturer', 'abbreviation', 'bom_abbreviation', 'ip_address', 'last_reading', 'connect_every', 'active', 'upload_data') list_filter = ('manufacturer', 'active', 'upload_data')
from django.contrib.admin import ModelAdmin, register from django.contrib.gis.admin import GeoModelAdmin from weather.models import WeatherStation, Location @register(Location) class LocationAdmin(GeoModelAdmin): list_display = ('pk', 'title', 'point', 'height') @register(WeatherStation) class WeatherStationAdmin(ModelAdmin): list_display = ( 'name', 'manufacturer', 'abbreviation', 'bom_abbreviation', 'ip_address', 'last_reading', 'connect_every', 'active', 'upload_data') list_filter = ('manufacturer', 'active', 'upload_data')
Remove custom OpenLayers.js from LocationAdmin.
Remove custom OpenLayers.js from LocationAdmin.
Python
bsd-3-clause
parksandwildlife/resource_tracking,parksandwildlife/resource_tracking,parksandwildlife/resource_tracking,ropable/resource_tracking,ropable/resource_tracking,ropable/resource_tracking
--- +++ @@ -5,7 +5,6 @@ @register(Location) class LocationAdmin(GeoModelAdmin): - openlayers_url = '//static.dpaw.wa.gov.au/static/libs/openlayers/2.13.1/OpenLayers.js' list_display = ('pk', 'title', 'point', 'height')
b2d654cf2af71b608d81c6501b214a9b330e1ffe
battlenet/utils.py
battlenet/utils.py
import unicodedata import urllib def normalize(name): if not isinstance(name, unicode): name = name.decode('utf-8') return unicodedata.normalize('NFKC', name.replace("'", '')).encode('utf-8') def quote(name): if isinstance(name, unicode): name = normalize(name).encode('utf8') return urllib.quote(name) def make_icon_url(region, icon, size='large'): if not icon: return '' if size == 'small': size = 18 else: size = 56 return 'http://%s.media.blizzard.com/wow/icons/%d/%s.jpg' % (region, size, icon) def make_connection(): if not hasattr(make_connection, 'Connection'): from .connection import Connection make_connection.Connection = Connection return make_connection.Connection()
import unicodedata import urllib def normalize(name): if not isinstance(name, unicode): name = name.decode('utf-8') return unicodedata.normalize('NFKC', name.replace("'", '')).encode('utf-8') def quote(name): if isinstance(name, unicode): name = normalize(name) return urllib.quote(name) def make_icon_url(region, icon, size='large'): if not icon: return '' if size == 'small': size = 18 else: size = 56 return 'http://%s.media.blizzard.com/wow/icons/%d/%s.jpg' % (region, size, icon) def make_connection(): if not hasattr(make_connection, 'Connection'): from .connection import Connection make_connection.Connection = Connection return make_connection.Connection()
Normalize already returns encoded value.
Normalize already returns encoded value.
Python
mit
PuckCh/battlenet,vishnevskiy/battlenet
--- +++ @@ -11,7 +11,7 @@ def quote(name): if isinstance(name, unicode): - name = normalize(name).encode('utf8') + name = normalize(name) return urllib.quote(name)
99b1edfcc317dbb71b8dad9ad501aaa21f8044f9
zorp/__init__.py
zorp/__init__.py
""" Zorp """ from zorp.client import Client from zorp.decorator import remote_method from zorp.server import Server
""" Zorp """ from zorp.client import Client, TriesExceededException from zorp.decorator import remote_method from zorp.server import Server
Add TriesExceededException to the base package
Add TriesExceededException to the base package
Python
mit
proxama/zorp
--- +++ @@ -2,6 +2,6 @@ Zorp """ -from zorp.client import Client +from zorp.client import Client, TriesExceededException from zorp.decorator import remote_method from zorp.server import Server
cec7922ad7636f62be864d115f8e341ac511bbc9
numba/tests/foreign_call/test_cffi_call.py
numba/tests/foreign_call/test_cffi_call.py
import os import ctypes import doctest from numba import * import numba try: import cffi ffi = cffi.FFI() except ImportError: ffi = None # ______________________________________________________________________ def test(): if ffi is not None: test_cffi_calls() # ______________________________________________________________________ # Tests @autojit(nopython=True) def call_cffi_func(func, value): return func(value) def test_cffi_calls(): # Test printf for nopython and no segfault ffi.cdef("int printf(char *, ...);") lib = ffi.dlopen(None) printf = lib.printf call_cffi_func(printf, "Hello world!\n") # ______________________________________________________________________ if __name__ == "__main__": test()
import os import ctypes import doctest from numba import * import numba try: import cffi ffi = cffi.FFI() except ImportError: ffi = None # ______________________________________________________________________ def test(): if ffi is not None: test_cffi_calls() # ______________________________________________________________________ # Tests @autojit(nopython=True) def call_cffi_func(func, value): return func(value) def test_cffi_calls(): # Test printf for nopython and no segfault ffi.cdef("int printf(char *, ...);", override=True) lib = ffi.dlopen(None) printf = lib.printf call_cffi_func(printf, "Hello world!\n") # ______________________________________________________________________ if __name__ == "__main__": test()
Fix CFFI test when executed multiple times
Fix CFFI test when executed multiple times
Python
bsd-2-clause
stefanseefeld/numba,jriehl/numba,IntelLabs/numba,stonebig/numba,gmarkall/numba,numba/numba,GaZ3ll3/numba,stefanseefeld/numba,numba/numba,IntelLabs/numba,cpcloud/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,shiquanwang/numba,stuartarchibald/numba,sklam/numba,pitrou/numba,gdementen/numba,gmarkall/numba,seibert/numba,ssarangi/numba,gmarkall/numba,ssarangi/numba,ssarangi/numba,numba/numba,pitrou/numba,jriehl/numba,stefanseefeld/numba,stonebig/numba,jriehl/numba,gdementen/numba,cpcloud/numba,sklam/numba,shiquanwang/numba,pombredanne/numba,pombredanne/numba,pombredanne/numba,gmarkall/numba,stuartarchibald/numba,pombredanne/numba,cpcloud/numba,stuartarchibald/numba,gdementen/numba,GaZ3ll3/numba,seibert/numba,gdementen/numba,seibert/numba,numba/numba,stefanseefeld/numba,ssarangi/numba,stuartarchibald/numba,IntelLabs/numba,cpcloud/numba,gmarkall/numba,gdementen/numba,stonebig/numba,stonebig/numba,stuartarchibald/numba,GaZ3ll3/numba,sklam/numba,seibert/numba,jriehl/numba,seibert/numba,GaZ3ll3/numba,jriehl/numba,pitrou/numba,shiquanwang/numba,GaZ3ll3/numba,sklam/numba,stefanseefeld/numba,IntelLabs/numba,numba/numba,pombredanne/numba,pitrou/numba,pitrou/numba,stonebig/numba,ssarangi/numba
--- +++ @@ -26,7 +26,7 @@ def test_cffi_calls(): # Test printf for nopython and no segfault - ffi.cdef("int printf(char *, ...);") + ffi.cdef("int printf(char *, ...);", override=True) lib = ffi.dlopen(None) printf = lib.printf call_cffi_func(printf, "Hello world!\n")
6e2dae94239252f6b0338e609a838fa31e417842
checks.d/veneur.py
checks.d/veneur.py
import datetime from urlparse import urljoin import requests # project from checks import AgentCheck class Veneur(AgentCheck): VERSION_METRIC_NAME = 'veneur.deployed_version' BUILDAGE_METRIC_NAME = 'veneur.build_age' ERROR_METRIC_NAME = 'veneur.agent_check.errors_total' def check(self, instance): success = 0 host = instance['host'] try: r = requests.get(urljoin(host, '/version')) sha = r.text success = 1 r = requests.get(urljoin(host, '/builddate')) builddate = datetime.datetime.fromtimestamp(int(r.text)) tdelta = datetime.datetime.now() - builddate self.histogram(self.BUILDAGE_METRIC_NAME, tdelta.total_seconds()) except: success = 0 self.increment(self.ERROR_METRIC_NAME) raise finally: tags = instance.get('tags', []) tags.extend(['sha:{0}'.format(sha)]) self.gauge(self.VERSION_METRIC_NAME, success, tags = tags)
import datetime from urlparse import urljoin import requests # project from checks import AgentCheck class Veneur(AgentCheck): VERSION_METRIC_NAME = 'veneur.deployed_version' BUILDAGE_METRIC_NAME = 'veneur.build_age' ERROR_METRIC_NAME = 'veneur.agent_check.errors_total' def check(self, instance): success = 0 host = instance['host'] tags = instance.get('tags', []) try: r = requests.get(urljoin(host, '/version')) tags.extend(['sha:{0}'.format(r.text)]) success = 1 r = requests.get(urljoin(host, '/builddate')) builddate = datetime.datetime.fromtimestamp(int(r.text)) tdelta = datetime.datetime.now() - builddate self.histogram(self.BUILDAGE_METRIC_NAME, tdelta.total_seconds()) except: success = 0 self.increment(self.ERROR_METRIC_NAME) raise finally: self.gauge(self.VERSION_METRIC_NAME, success, tags = tags)
Add the sha to the tags in a way that won't cause an error
Add the sha to the tags in a way that won't cause an error
Python
mit
stripe/stripe-datadog-checks,stripe/datadog-checks
--- +++ @@ -15,10 +15,11 @@ success = 0 host = instance['host'] + tags = instance.get('tags', []) try: r = requests.get(urljoin(host, '/version')) - sha = r.text + tags.extend(['sha:{0}'.format(r.text)]) success = 1 r = requests.get(urljoin(host, '/builddate')) @@ -32,6 +33,4 @@ self.increment(self.ERROR_METRIC_NAME) raise finally: - tags = instance.get('tags', []) - tags.extend(['sha:{0}'.format(sha)]) self.gauge(self.VERSION_METRIC_NAME, success, tags = tags)
cc00cc1c2539eb7dbeed2656e1929c8c53c4dd98
pyverdict/pyverdict/datatype_converters/impala_converter.py
pyverdict/pyverdict/datatype_converters/impala_converter.py
from .converter_base import DatatypeConverterBase import dateutil def _str_to_datetime(java_obj, idx): return dateutil.parser.parse(java_obj.getString(idx)) _typename_to_converter_fxn = {'timestamp': _str_to_datetime} class ImpalaConverter(DatatypeConverterBase): @staticmethod def read_value(result_set, index, col_typename): if col_typename in _typename_to_converter_fxn: if result_set.getString(index) is None: return None return _typename_to_converter_fxn[col_typename](result_set, index) else: return result_set.getValue(index)
from .converter_base import DatatypeConverterBase import dateutil def _str_to_datetime(java_obj, idx): return dateutil.parser.parse(java_obj.getString(idx)) _typename_to_converter_fxn = {'timestamp': _str_to_datetime} class ImpalaConverter(DatatypeConverterBase): ''' Type conversion rule: BIGINT => int, BOOLEAN => bool, CHAR => str, DECIMAL => decimal.Decimal, DOUBLE => float, FLOAT => float, REAL => float, SMALLINT => int, STRING => str, TIMESTAMP => datetime.datetime, TINYINT => int, VARCHAR => str ''' @staticmethod def read_value(result_set, index, col_typename): if col_typename in _typename_to_converter_fxn: if result_set.getString(index) is None: return None return _typename_to_converter_fxn[col_typename](result_set, index) else: return result_set.getValue(index)
Add type conversion rule comment
Add type conversion rule comment
Python
apache-2.0
mozafari/verdict,mozafari/verdict,mozafari/verdict,mozafari/verdict,mozafari/verdict
--- +++ @@ -9,6 +9,24 @@ class ImpalaConverter(DatatypeConverterBase): + ''' + Type conversion rule: + + BIGINT => int, + BOOLEAN => bool, + CHAR => str, + DECIMAL => decimal.Decimal, + DOUBLE => float, + FLOAT => float, + REAL => float, + SMALLINT => int, + STRING => str, + TIMESTAMP => datetime.datetime, + TINYINT => int, + VARCHAR => str + + ''' + @staticmethod def read_value(result_set, index, col_typename): if col_typename in _typename_to_converter_fxn:
f5f4397d026678570ad271e84099d2bcec541c72
whacked4/whacked4/ui/dialogs/startdialog.py
whacked4/whacked4/ui/dialogs/startdialog.py
#!/usr/bin/env python #coding=utf8 from whacked4 import config from whacked4.ui import windows class StartDialog(windows.StartDialogBase): """ This dialog is meant to be displayed on startup of the application. It allows the user to quickly access some common functions without having to dig down into a menu first. """ def __init__(self, parent): windows.StartDialogBase.__init__(self, parent) client_width = self.FileList.GetClientSizeTuple()[0] - 4 self.FileList.InsertColumn(0, 'Filename', width=client_width) # Populate the list of recently accessed Dehacked patches. recent_files = config.settings['recent_files'] for index, filename in enumerate(recent_files): self.FileList.InsertStringItem(index, filename) def open_file_list(self, event): """ Opens a Dehacked patch directly from the file list. """ self.Hide() filename = config.settings['recent_files'][event.GetIndex()] self.GetParent().open_file(filename) def new_file(self, event): self.Hide() self.GetParent().new_file() def open_file(self, event): self.Hide() self.GetParent().open_file_dialog() def cancel(self, event): self.Hide()
#!/usr/bin/env python #coding=utf8 from whacked4 import config from whacked4.ui import windows class StartDialog(windows.StartDialogBase): """ This dialog is meant to be displayed on startup of the application. It allows the user to quickly access some common functions without having to dig down into a menu first. """ def __init__(self, parent): windows.StartDialogBase.__init__(self, parent) client_width = self.FileList.GetClientSizeTuple()[0] - 4 self.FileList.InsertColumn(0, 'Filename', width=client_width) # Populate the list of recently accessed Dehacked patches. config.settings.recent_files_clean() recent_files = config.settings['recent_files'] for index, filename in enumerate(recent_files): self.FileList.InsertStringItem(index, filename) def open_file_list(self, event): """ Opens a Dehacked patch directly from the file list. """ self.Hide() filename = config.settings['recent_files'][event.GetIndex()] self.GetParent().open_file(filename) def new_file(self, event): self.Hide() self.GetParent().new_file() def open_file(self, event): self.Hide() self.GetParent().open_file_dialog() def cancel(self, event): self.Hide()
Clean the recent files list before displaying it in the startup dialog.
Clean the recent files list before displaying it in the startup dialog.
Python
bsd-2-clause
GitExl/WhackEd4,GitExl/WhackEd4
--- +++ @@ -18,6 +18,7 @@ self.FileList.InsertColumn(0, 'Filename', width=client_width) # Populate the list of recently accessed Dehacked patches. + config.settings.recent_files_clean() recent_files = config.settings['recent_files'] for index, filename in enumerate(recent_files): self.FileList.InsertStringItem(index, filename)
84b27afce57232bc0c8170eaad1beb26fd96eef0
tools/gyp/find_mac_gcc_version.py
tools/gyp/find_mac_gcc_version.py
#!/usr/bin/env python # Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. import re import subprocess import sys def main(): job = subprocess.Popen(['xcodebuild', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = job.communicate() if job.returncode != 0: print >>sys.stderr, stdout print >>sys.stderr, stderr raise Exception('Error %d running xcodebuild!' % job.returncode) matches = re.findall('^Xcode (\d+)\.(\d+)(\.(\d+))?$', stdout, re.MULTILINE) if len(matches) > 0: major = int(matches[0][0]) minor = int(matches[0][1]) if major >= 4: return 'com.apple.compilers.llvmgcc42' elif major == 3 and minor >= 1: return '4.2' else: raise Exception('Unknown XCode Version "%s"' % version_match) else: raise Exception('Could not parse output of xcodebuild "%s"' % stdout) if __name__ == '__main__': if sys.platform != 'darwin': raise Exception("This script only runs on Mac") print main()
#!/usr/bin/env python # Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. import re import subprocess import sys def main(): job = subprocess.Popen(['xcodebuild', '-version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = job.communicate() if job.returncode != 0: print >>sys.stderr, stdout print >>sys.stderr, stderr raise Exception('Error %d running xcodebuild!' % job.returncode) matches = re.findall('^Xcode (\d+)\.(\d+)(\.(\d+))?$', stdout, re.MULTILINE) if len(matches) > 0: major = int(matches[0][0]) minor = int(matches[0][1]) if major == 3 and minor >= 1: return '4.2' elif major == 4 and minor < 5: return 'com.apple.compilers.llvmgcc42' elif major == 4 and minor >= 5: # XCode seems to select the specific clang version automatically return 'com.apple.compilers.llvm.clang.1_0' else: raise Exception('Unknown XCode Version "%s"' % version_match) else: raise Exception('Could not parse output of xcodebuild "%s"' % stdout) if __name__ == '__main__': if sys.platform != 'darwin': raise Exception("This script only runs on Mac") print main()
Use clang on mac if XCode >= 4.5
Use clang on mac if XCode >= 4.5 Review URL: https://codereview.chromium.org//14333010 git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@21950 260f80e4-7a28-3924-810f-c04153c831b5
Python
bsd-3-clause
dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk
--- +++ @@ -21,10 +21,13 @@ major = int(matches[0][0]) minor = int(matches[0][1]) - if major >= 4: + if major == 3 and minor >= 1: + return '4.2' + elif major == 4 and minor < 5: return 'com.apple.compilers.llvmgcc42' - elif major == 3 and minor >= 1: - return '4.2' + elif major == 4 and minor >= 5: + # XCode seems to select the specific clang version automatically + return 'com.apple.compilers.llvm.clang.1_0' else: raise Exception('Unknown XCode Version "%s"' % version_match) else:
16742262b6a37f34bf83b3b6d6bcfd72e69276b2
imagersite/imager_profile/models.py
imagersite/imager_profile/models.py
import six from django.db import models from django.contrib.auth.models import User @six.python_2_unicode_compatible class ImagerProfile(models.Model): user = models.OneToOneField(User) fav_camera = models.CharField(max_length=30) address = models.CharField(max_length=100) web_url = models.URLField() type_photography = models.CharField(max_length=30) def __str__(self): return "{}'s profile".format(self.user.username)
import six from django.db import models from django.contrib.auth.models import ActiveProfileManager, User @six.python_2_unicode_compatible class ImagerProfile(models.Model): user = models.OneToOneField( User, nullable=False ) fav_camera = models.CharField( max_length=30 ) address = models.CharField() web_url = models.URLField() type_photography = models.CharField(max_length=30) objects = models.Manager() active = ActiveProfileManager() def __str__(self): return "{}'s profile".format(self.user.username) def is_active(self): return self.user.is_active
Make sure a user has profile
Make sure a user has profile
Python
mit
jesseklein406/django-imager,jesseklein406/django-imager,jesseklein406/django-imager
--- +++ @@ -1,16 +1,27 @@ import six from django.db import models -from django.contrib.auth.models import User +from django.contrib.auth.models import ActiveProfileManager, User @six.python_2_unicode_compatible class ImagerProfile(models.Model): - user = models.OneToOneField(User) - fav_camera = models.CharField(max_length=30) - address = models.CharField(max_length=100) + user = models.OneToOneField( + User, + nullable=False + ) + fav_camera = models.CharField( + max_length=30 + ) + address = models.CharField() web_url = models.URLField() type_photography = models.CharField(max_length=30) + objects = models.Manager() + active = ActiveProfileManager() + def __str__(self): return "{}'s profile".format(self.user.username) + + def is_active(self): + return self.user.is_active
1a089ec6f34ebf81b4437b6f541ee2b9f4b85966
osf/migrations/0145_pagecounter_data.py
osf/migrations/0145_pagecounter_data.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-11-12 17:18 from __future__ import unicode_literals from django.db import migrations, connection def reverse_func(state, schema): with connection.cursor() as cursor: cursor.execute( """ UPDATE osf_pagecounter SET (action, guid_id, file_id, version) = ('download', NULL, NULL, NULL); """ ) def separate_pagecounter_id(state, schema): """ Splits the data in pagecounter _id field of form action:guid_id:file_id:version into four new columns: action(char), guid(fk), file(fk), version(int) """ with connection.cursor() as cursor: cursor.execute( """ UPDATE osf_pagecounter PC SET (action, guid_id, file_id, version) = (split_part(PC._id, ':', 1), G.id, F.id, NULLIF(split_part(PC._id, ':', 4), '')::int) FROM osf_guid G, osf_basefilenode F WHERE PC._id LIKE '%' || G._id || '%' AND PC._id LIKE '%' || F._id || '%'; """ ) class Migration(migrations.Migration): dependencies = [ ('osf', '0144_pagecounter_index'), ] operations = [ migrations.RunPython( separate_pagecounter_id, reverse_func ), ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-11-12 17:18 from __future__ import unicode_literals from django.db import migrations reverse_func = [ """ UPDATE osf_pagecounter SET (action, guid_id, file_id, version) = ('download', NULL, NULL, NULL); """ ] # Splits the data in pagecounter _id field of form action:guid_id:file_id:version into # four new columns: action(char), guid(fk), file(fk), version(int) separate_pagecounter_id = [ """ UPDATE osf_pagecounter PC SET (action, guid_id, file_id, version) = (split_part(PC._id, ':', 1), G.id, F.id, NULLIF(split_part(PC._id, ':', 4), '')::int) FROM osf_guid G, osf_basefilenode F WHERE PC._id LIKE '%' || G._id || '%' AND PC._id LIKE '%' || F._id || '%'; """ ] class Migration(migrations.Migration): dependencies = [ ('osf', '0144_pagecounter_index'), ] operations = [ migrations.RunSQL( separate_pagecounter_id, reverse_func ), ]
Call RunSQL instead of RunPython in pagecounter data migration.
Call RunSQL instead of RunPython in pagecounter data migration.
Python
apache-2.0
cslzchen/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,felliott/osf.io,baylee-d/osf.io,mattclark/osf.io,mfraezz/osf.io,aaxelb/osf.io,felliott/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,mattclark/osf.io,adlius/osf.io,saradbowman/osf.io,adlius/osf.io,mfraezz/osf.io,aaxelb/osf.io,baylee-d/osf.io,adlius/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,felliott/osf.io,felliott/osf.io,mattclark/osf.io,baylee-d/osf.io,adlius/osf.io
--- +++ @@ -1,34 +1,28 @@ # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-11-12 17:18 from __future__ import unicode_literals - -from django.db import migrations, connection +from django.db import migrations -def reverse_func(state, schema): - with connection.cursor() as cursor: - cursor.execute( - """ - UPDATE osf_pagecounter - SET (action, guid_id, file_id, version) = ('download', NULL, NULL, NULL); - """ - ) +reverse_func = [ + """ + UPDATE osf_pagecounter + SET (action, guid_id, file_id, version) = ('download', NULL, NULL, NULL); + """ +] -def separate_pagecounter_id(state, schema): +# Splits the data in pagecounter _id field of form action:guid_id:file_id:version into +# four new columns: action(char), guid(fk), file(fk), version(int) +separate_pagecounter_id = [ """ - Splits the data in pagecounter _id field of form action:guid_id:file_id:version into - four new columns: action(char), guid(fk), file(fk), version(int) + UPDATE osf_pagecounter PC + SET (action, guid_id, file_id, version) = (split_part(PC._id, ':', 1), G.id, F.id, NULLIF(split_part(PC._id, ':', 4), '')::int) + FROM osf_guid G, osf_basefilenode F + WHERE PC._id LIKE '%' || G._id || '%' + AND PC._id LIKE '%' || F._id || '%'; """ - with connection.cursor() as cursor: - cursor.execute( - """ - UPDATE osf_pagecounter PC - SET (action, guid_id, file_id, version) = (split_part(PC._id, ':', 1), G.id, F.id, NULLIF(split_part(PC._id, ':', 4), '')::int) - FROM osf_guid G, osf_basefilenode F - WHERE PC._id LIKE '%' || G._id || '%' - AND PC._id LIKE '%' || F._id || '%'; - """ - ) +] + class Migration(migrations.Migration): @@ -37,7 +31,7 @@ ] operations = [ - migrations.RunPython( + migrations.RunSQL( separate_pagecounter_id, reverse_func ), ]
050394cc0c228edf8ac9c3ea6f2001d36d874841
spdypy/connection.py
spdypy/connection.py
# -*- coding: utf-8 -*- """ spdypy.connection ~~~~~~~~~~~~~~~~~ Contains the code necessary for working with SPDY connections. """ # Define some states for SPDYConnections. NEW = 'NEW' class SPDYConnection(object): """ A representation of a single SPDY connection to a remote server. This object takes responsibility for managing the complexities of the SPDY protocol, including streams and options. This complexity is abstracted away into an interface that broadly looks like the standard library's HTTPSConnection class. :param host: The host to establish a connection to. """ def __init__(self, host): self.host = host self._state = NEW
# -*- coding: utf-8 -*- """ spdypy.connection ~~~~~~~~~~~~~~~~~ Contains the code necessary for working with SPDY connections. """ # Define some states for SPDYConnections. NEW = 'NEW' class SPDYConnection(object): """ A representation of a single SPDY connection to a remote server. This object takes responsibility for managing the complexities of the SPDY protocol, including streams and options. This complexity is abstracted away into an interface that broadly looks like the standard library's HTTPSConnection class. :param host: The host to establish a connection to. """ def __init__(self, host): self.host = host self._state = NEW def request(self, method, url, body=None, headers={}): """ This will send a request to the server using the HTTP request method ``method`` and the selector ``url``. If the ``body`` argument is present, it should be a string or bytes object of data to send after the headers are finished. Strings are encoded as ISO-8859-1, the default charset for HTTP. To use other encodings, pass a bytes object. The Content-Length header is set to the length of the string. The ``headers`` object should be a mapping of extra HTTP headers to send with the request. """ pass
Define the shell of the request method.
Define the shell of the request method.
Python
mit
Lukasa/spdypy
--- +++ @@ -7,6 +7,7 @@ """ # Define some states for SPDYConnections. NEW = 'NEW' + class SPDYConnection(object): """ @@ -21,3 +22,17 @@ def __init__(self, host): self.host = host self._state = NEW + + def request(self, method, url, body=None, headers={}): + """ + This will send a request to the server using the HTTP request method + ``method`` and the selector ``url``. If the ``body`` argument is + present, it should be a string or bytes object of data to send after + the headers are finished. Strings are encoded as ISO-8859-1, the + default charset for HTTP. To use other encodings, pass a bytes object. + The Content-Length header is set to the length of the string. + + The ``headers`` object should be a mapping of extra HTTP headers to + send with the request. + """ + pass
e3a298bcbe0eac1d2a0ade13244b0f3650bd6c49
pyinfra/pseudo_modules.py
pyinfra/pseudo_modules.py
# pyinfra # File: pyinfra/pseudo_modules.py # Desc: essentially a hack that provides dynamic imports for the current deploy (CLI only) import sys import pyinfra class PseudoModule(object): _module = None def __getattr__(self, key): return getattr(self._module, key) def __setattr__(self, key, value): if key == '_module': return object.__setattr__(self, key, value) setattr(self._module, key, value) def __iter__(self): return iter(self._module) def set(self, module): self._module = module def reset(self): self._module = None def isset(self): return self._module is not None # The current deploy state sys.modules['pyinfra.pseudo_state'] = sys.modules['pyinfra.state'] = \ pyinfra.pseudo_state = pyinfra.state = \ PseudoModule() # The current deploy inventory sys.modules['pyinfra.pseudo_inventory'] = sys.modules['pyinfra.inventory'] = \ pyinfra.pseudo_inventory = pyinfra.inventory = \ PseudoModule() # The current target host sys.modules['pyinfra.pseudo_host'] = sys.modules['pyinfra.host'] = \ pyinfra.pseudo_host = pyinfra.host = \ PseudoModule()
# pyinfra # File: pyinfra/pseudo_modules.py # Desc: essentially a hack that provides dynamic imports for the current deploy (CLI only) import sys import pyinfra class PseudoModule(object): _module = None def __getattr__(self, key): return getattr(self._module, key) def __setattr__(self, key, value): if key == '_module': return object.__setattr__(self, key, value) setattr(self._module, key, value) def __getitem__(self, key): return self._module[key] def __iter__(self): return iter(self._module) def __len__(self): return len(self._module) def set(self, module): self._module = module def reset(self): self._module = None def isset(self): return self._module is not None # The current deploy state sys.modules['pyinfra.pseudo_state'] = sys.modules['pyinfra.state'] = \ pyinfra.pseudo_state = pyinfra.state = \ PseudoModule() # The current deploy inventory sys.modules['pyinfra.pseudo_inventory'] = sys.modules['pyinfra.inventory'] = \ pyinfra.pseudo_inventory = pyinfra.inventory = \ PseudoModule() # The current target host sys.modules['pyinfra.pseudo_host'] = sys.modules['pyinfra.host'] = \ pyinfra.pseudo_host = pyinfra.host = \ PseudoModule()
Add getitem and len support for pseudo modules.
Add getitem and len support for pseudo modules.
Python
mit
Fizzadar/pyinfra,Fizzadar/pyinfra
--- +++ @@ -18,8 +18,14 @@ setattr(self._module, key, value) + def __getitem__(self, key): + return self._module[key] + def __iter__(self): return iter(self._module) + + def __len__(self): + return len(self._module) def set(self, module): self._module = module
e83019f67a3c93efac27566666bcff5eb0d2a0da
examples/autopost/auto_post.py
examples/autopost/auto_post.py
import time import sys import os import glob sys.path.append(os.path.join(sys.path[0], '../../')) from instabot import Bot posted_pic_list = [] try: with open('pics.txt', 'r') as f: posted_pic_list = f.read().splitlines() except Exception: posted_pic_list = [] timeout = 24 * 60 * 60 # pics will be posted every 24 hours bot = Bot() bot.login() while True: pics = glob.glob("./pics/*.jpg") pics = sorted(pics) try: for pic in pics: if pic in posted_pic_list: continue caption = pic[:-4].split(" ") caption = " ".join(caption[1:]) print("upload: " + caption) bot.uploadPhoto(pic, caption=caption) if bot.LastResponse.status_code != 200: print(bot.LastResponse) # snd msg break if pic not in posted_pic_list: posted_pic_list.append(pic) with open('pics.txt', 'a') as f: f.write(pic + "\n") time.sleep(timeout) except Exception as e: print(str(e)) time.sleep(60)
import glob import os import sys import time from io import open sys.path.append(os.path.join(sys.path[0], '../../')) from instabot import Bot posted_pic_list = [] try: with open('pics.txt', 'r', encoding='utf8') as f: posted_pic_list = f.read().splitlines() except Exception: posted_pic_list = [] timeout = 24 * 60 * 60 # pics will be posted every 24 hours bot = Bot() bot.login() while True: pics = glob.glob("./pics/*.jpg") pics = sorted(pics) try: for pic in pics: if pic in posted_pic_list: continue caption = pic[:-4].split(" ") caption = " ".join(caption[1:]) print("upload: " + caption) bot.uploadPhoto(pic, caption=caption) if bot.LastResponse.status_code != 200: print(bot.LastResponse) # snd msg break if pic not in posted_pic_list: posted_pic_list.append(pic) with open('pics.txt', 'a', encoding='utf8') as f: f.write(pic + "\n") time.sleep(timeout) except Exception as e: print(str(e)) time.sleep(60)
Add utf-8 in autopost example
Add utf-8 in autopost example
Python
apache-2.0
instagrambot/instabot,ohld/instabot,instagrambot/instabot
--- +++ @@ -1,14 +1,16 @@ +import glob +import os +import sys import time -import sys -import os -import glob + +from io import open sys.path.append(os.path.join(sys.path[0], '../../')) from instabot import Bot posted_pic_list = [] try: - with open('pics.txt', 'r') as f: + with open('pics.txt', 'r', encoding='utf8') as f: posted_pic_list = f.read().splitlines() except Exception: posted_pic_list = [] @@ -38,7 +40,7 @@ if pic not in posted_pic_list: posted_pic_list.append(pic) - with open('pics.txt', 'a') as f: + with open('pics.txt', 'a', encoding='utf8') as f: f.write(pic + "\n") time.sleep(timeout)