commit stringlengths 40 40 | old_file stringlengths 4 150 | new_file stringlengths 4 150 | old_contents stringlengths 0 3.26k | new_contents stringlengths 1 4.43k | subject stringlengths 15 501 | message stringlengths 15 4.06k | lang stringclasses 4 values | license stringclasses 13 values | repos stringlengths 5 91.5k | diff stringlengths 0 4.35k |
|---|---|---|---|---|---|---|---|---|---|---|
e743bcddbc53d51142f3e1277919a3f65afaad90 | tests/conftest.py | tests/conftest.py | import base64
import betamax
import os
credentials = [os.environ.get('GH_USER', 'foo').encode(),
os.environ.get('GH_PASSWORD', 'bar').encode()]
with betamax.Betamax.configure() as config:
config.cassette_library_dir = 'tests/cassettes'
record_mode = 'never' if os.environ.get('TRAVIS_GH3') else 'once'
print('Record mode: {0}'.format(record_mode))
config.default_cassette_options['record_mode'] = record_mode
config.define_cassette_placeholder(
'<AUTH_TOKEN>',
os.environ.get('GH_AUTH', 'x' * 20)
)
config.define_cassette_placeholder(
'<BASIC_AUTH>',
base64.b64encode(b':'.join(credentials)).decode()
)
| import base64
import betamax
import os
credentials = [os.environ.get('GH_USER', 'foo').encode(),
os.environ.get('GH_PASSWORD', 'bar').encode()]
with betamax.Betamax.configure() as config:
config.cassette_library_dir = 'tests/cassettes'
record_mode = 'never' if os.environ.get('TRAVIS_GH3') else 'once'
config.default_cassette_options['record_mode'] = record_mode
config.define_cassette_placeholder(
'<AUTH_TOKEN>',
os.environ.get('GH_AUTH', 'x' * 20)
)
config.define_cassette_placeholder(
'<BASIC_AUTH>',
base64.b64encode(b':'.join(credentials)).decode()
)
| Revert "For travis, let us print the mode" | Revert "For travis, let us print the mode"
This reverts commit 0c8e9c36219214cf08b33c0ff1812e6cefa53353.
| Python | bsd-3-clause | sigmavirus24/github3.py,agamdua/github3.py,christophelec/github3.py,jim-minter/github3.py,ueg1990/github3.py,degustaf/github3.py,balloob/github3.py,h4ck3rm1k3/github3.py,icio/github3.py,wbrefvem/github3.py,krxsky/github3.py,itsmemattchung/github3.py | ---
+++
@@ -9,7 +9,6 @@
config.cassette_library_dir = 'tests/cassettes'
record_mode = 'never' if os.environ.get('TRAVIS_GH3') else 'once'
- print('Record mode: {0}'.format(record_mode))
config.default_cassette_options['record_mode'] = record_mode
|
166ca44e70221e35a8e0bd36b5b21ba51f7032ca | FAUSTPy/__init__.py | FAUSTPy/__init__.py | """
A set of classes used to dynamically wrap FAUST DSP programs in Python.
This package defines three types:
- PythonUI is an implementation of the UIGlue C struct.
- FAUSTDsp wraps the DSP struct.
- FAUST integrates the other two, sets up the CFFI environment (defines the
data types and API) and compiles the FAUST program. This is the class you
most likely want to use.
"""
from . wrapper import FAUST
from . python_ui import PythonUI, param
from . python_dsp import FAUSTDsp
__all__ = ["FAUST", "PythonUI", "FAUSTDsp", "param", "wrapper"]
| #/usr/bin/env python
"""
A set of classes used to dynamically wrap FAUST DSP programs in Python.
This package defines three types:
- PythonUI is an implementation of the UIGlue C struct.
- FAUSTDsp wraps the DSP struct.
- FAUST integrates the other two, sets up the CFFI environment (defines the
data types and API) and compiles the FAUST program. This is the class you
most likely want to use.
"""
from . wrapper import FAUST
from . python_ui import PythonUI, param
from . python_dsp import FAUSTDsp
__all__ = ["FAUST", "PythonUI", "FAUSTDsp", "param", "wrapper"]
| Add shebang line to package init file. | Add shebang line to package init file.
| Python | mit | marcecj/faust_python | ---
+++
@@ -1,3 +1,5 @@
+#/usr/bin/env python
+
"""
A set of classes used to dynamically wrap FAUST DSP programs in Python.
|
63a23acabd83cbf32c85ad667da0aed9ae0599e6 | unitTestUtils/parseXML.py | unitTestUtils/parseXML.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from xml.etree.ElementTree import ParseError
import xml.etree.ElementTree as ET
import glob
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def parse():
for infile in glob.glob('*.xml'):
try:
tree = ET.parse(infile)
root = tree.getroot()
if root.findall('.//FatalError'):
eprint("Error detected")
print(infile)
sys.exit(1)
except ParseError:
eprint("The file xml isn't correct. There were some mistakes in the tests ")
sys.exit(1)
def main():
parse()
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from xml.etree.ElementTree import ParseError
import xml.etree.ElementTree as ET
import glob
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def parse():
for infile in glob.glob('*.xml'):
try:
tree = ET.parse(infile)
root = tree.getroot()
if root.findall('.//FatalError'):
element=root.findall('.//FatalError')[0]
eprint("Error detected")
print(infile)
print(element.text)
sys.exit(1)
except ParseError:
eprint("The file xml isn't correct. There were some mistakes in the tests ")
sys.exit(1)
def main():
parse()
if __name__ == '__main__':
main()
| Add more verbose error to reporte on Travis parserXML.py | Add more verbose error to reporte on Travis parserXML.py
| Python | apache-2.0 | wkrzemien/j-pet-framework,wkrzemien/j-pet-framework,wkrzemien/j-pet-framework,wkrzemien/j-pet-framework,wkrzemien/j-pet-framework | ---
+++
@@ -16,8 +16,10 @@
tree = ET.parse(infile)
root = tree.getroot()
if root.findall('.//FatalError'):
+ element=root.findall('.//FatalError')[0]
eprint("Error detected")
print(infile)
+ print(element.text)
sys.exit(1)
except ParseError:
eprint("The file xml isn't correct. There were some mistakes in the tests ") |
1916d1effbad1a26b95594b909c30b3732df3580 | polling_stations/apps/data_collection/management/commands/import_shape_shape.py | polling_stations/apps/data_collection/management/commands/import_shape_shape.py | """
Import COUNCIL
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from COUNCIL
"""
council_id = 'your_councilid'
districts_name = 'name_without_.shp'
stations_name = 'your.shp'
def district_record_to_dict(self, record):
print 'District:', record
sys.exit(1)
return {
'internal_council_id': record[0],
'name': record[1],
}
def station_record_to_dict(self, record):
print 'Station', record
sys.exit(1)
return {
'internal_council_id': record.internal_id,
'postcode' : record.address.split(',')[-1],
'address' : "\n".join(record.address.split(',')[:-1]),
}
| """
Import COUNCIL
"""
import sys
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from COUNCIL
"""
council_id = 'your_councilid'
districts_name = 'name_without_.shp'
stations_name = 'your.shp'
def district_record_to_dict(self, record):
print 'District:', record
sys.exit(1)
return {
'internal_council_id': record[0],
'name': record[1],
}
def station_record_to_dict(self, record):
print 'Station', record
sys.exit(1)
return {
'internal_council_id': record[0],
'postcode' : record[1],
'address' : record[2]
}
| Update Shape&Shape importer - shapes don't have named fields! | Update Shape&Shape importer - shapes don't have named fields!
| Python | bsd-3-clause | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations | ---
+++
@@ -25,8 +25,8 @@
print 'Station', record
sys.exit(1)
return {
- 'internal_council_id': record.internal_id,
- 'postcode' : record.address.split(',')[-1],
- 'address' : "\n".join(record.address.split(',')[:-1]),
+ 'internal_council_id': record[0],
+ 'postcode' : record[1],
+ 'address' : record[2]
}
|
cea8c101c64a33b2daedc9b8561a7d8e38f0b958 | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='WsgiService',
version='0.2',
description="A lean WSGI framework for easy creation of REST services",
author="Patrice Neff",
url='http://github.com/pneff/wsgiservice/tree/master',
packages=find_packages(),
install_requires=[
'decorator',
'webob',
],
tests_require=[
'nose',
'mox',
],
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
]
)
| from setuptools import setup, find_packages
setup(
name='WsgiService',
version='0.2.1',
description="A lean WSGI framework for easy creation of REST services",
author="Patrice Neff",
url='http://github.com/pneff/wsgiservice/tree/master',
packages=find_packages(),
install_requires=[
'decorator',
'webob',
],
tests_require=[
'nose',
'mox',
],
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
]
)
| Increment version to 0.2.1 (unreleased) for all further changes. | Increment version to 0.2.1 (unreleased) for all further changes.
| Python | bsd-2-clause | pneff/wsgiservice,beekpr/wsgiservice | ---
+++
@@ -1,7 +1,7 @@
from setuptools import setup, find_packages
setup(
name='WsgiService',
- version='0.2',
+ version='0.2.1',
description="A lean WSGI framework for easy creation of REST services",
author="Patrice Neff",
url='http://github.com/pneff/wsgiservice/tree/master', |
680e8472416e71da5fff41032db49398f62e52d7 | setup.py | setup.py | import sys
import os
from setuptools import setup
long_description = open('README.rst').read()
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
setup_kwargs = dict(
name='powershift-cli',
version='1.1.2',
description='Pluggable command line client for OpenShift.',
long_description=long_description,
url='https://github.com/getwarped/powershift-cli',
author='Graham Dumpleton',
author_email='Graham.Dumpleton@gmail.com',
license='BSD',
classifiers=classifiers,
keywords='openshift kubernetes',
packages=['powershift', 'powershift.cli'],
package_dir={'powershift': 'src/powershift'},
package_data={'powershift.cli': ['completion-bash.sh']},
entry_points = {'console_scripts':['powershift = powershift.cli:main']},
install_requires=['click'],
extras_require={'cluster': ['powershift-cluster>=1.1.1']},
)
setup(**setup_kwargs)
| import sys
import os
from setuptools import setup
long_description = open('README.rst').read()
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
setup_kwargs = dict(
name='powershift-cli',
version='1.1.2',
description='Pluggable command line client for OpenShift.',
long_description=long_description,
url='https://github.com/getwarped/powershift-cli',
author='Graham Dumpleton',
author_email='Graham.Dumpleton@gmail.com',
license='BSD',
classifiers=classifiers,
keywords='openshift kubernetes',
packages=['powershift', 'powershift.cli'],
package_dir={'powershift': 'src/powershift'},
package_data={'powershift.cli': ['completion-bash.sh']},
entry_points = {'console_scripts':['powershift = powershift.cli:main']},
install_requires=['click'],
extras_require={'all':['powershift-cluster>=1.1.1'],
'cluster':['powershift-cluster>=1.1.1']},
)
setup(**setup_kwargs)
| Add 'all' option for extra packages, which will install all available rather than listing individually. | Add 'all' option for extra packages, which will install all available rather than listing individually.
| Python | bsd-2-clause | getwarped/powershift-cli,getwarped/powershift-cli | ---
+++
@@ -33,7 +33,8 @@
package_data={'powershift.cli': ['completion-bash.sh']},
entry_points = {'console_scripts':['powershift = powershift.cli:main']},
install_requires=['click'],
- extras_require={'cluster': ['powershift-cluster>=1.1.1']},
+ extras_require={'all':['powershift-cluster>=1.1.1'],
+ 'cluster':['powershift-cluster>=1.1.1']},
)
setup(**setup_kwargs) |
071d8df8a985c77397c5b2caf27722101aaefea1 | setup.py | setup.py | #!/usr/bin/python3
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gorynych',
version='0.0.1',
description='Automatic data collecting and refining system',
long_description=long_description,
url='https://github.com/vurmux/gorynych',
author='Andrey Voronov',
author_email='vurmux@gmail.com',
license='Apache',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Information Technology',
'Topic :: Internet',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
],
keywords='scraping nlp',
packages=find_packages(exclude=['docs', 'img']),
install_requires=['beautifulsoup4', 'requests', 'nltk'],
extras_require={
'dev': [],
'test': ['coverage'],
},
package_data={
'gorynych': ['*.txt', '*.json'],
},
entry_points={
'console_scripts': [
'gorynych-daemon=gorynych.gorynych_daemon:main',
],
},
)
| #!/usr/bin/python3
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gorynych',
version='0.0.1',
description='Automatic data collecting and refining system',
long_description=long_description,
url='https://github.com/vurmux/gorynych',
author='Andrey Voronov',
author_email='vurmux@gmail.com',
license='Apache',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Information Technology',
'Topic :: Internet',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
],
keywords='scraping nlp',
packages=find_packages(exclude=['docs', 'img']),
install_requires=['beautifulsoup4', 'requests', 'nltk', 'schedule'],
extras_require={
'dev': [],
'test': ['coverage'],
},
package_data={
'gorynych': ['*.txt', '*.json'],
},
entry_points={
'console_scripts': [
'gorynych-daemon=gorynych.gorynych_daemon:main',
],
},
)
| Add missed 'schedule' package to install_requires | Add missed 'schedule' package to install_requires
| Python | apache-2.0 | vurmux/gorynych | ---
+++
@@ -29,7 +29,7 @@
],
keywords='scraping nlp',
packages=find_packages(exclude=['docs', 'img']),
- install_requires=['beautifulsoup4', 'requests', 'nltk'],
+ install_requires=['beautifulsoup4', 'requests', 'nltk', 'schedule'],
extras_require={
'dev': [],
'test': ['coverage'], |
2ceb5601a98b825276d6199a140277c18c2e4c5d | setup.py | setup.py | from distutils.core import setup
setup(
name='planet_alignment',
version='1.0.0',
packages=['planet_alignment', 'planet_alignment.test'],
url='https://github.com/paulfanelli/planet_alignment.git',
license='MIT',
author='Paul Fanelli',
author_email='paul.fanelli@gmail.com',
description='Planet Alignment program',
requires=['bunch', 'zope.interface', 'PyYAML'],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'alignment = planet_alignment.__main__:main'
]
}
)
| from distutils.core import setup
setup(
name='planet_alignment',
version='1.0.0',
packages=['planet_alignment', 'planet_alignment.test'],
url='https://github.com/paulfanelli/planet_alignment.git',
license='MIT',
author='Paul Fanelli',
author_email='paul.fanelli@gmail.com',
description='Planet Alignment program',
requires=['bunch', 'zope.interface', 'PyYAML'],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'planet_alignment = planet_alignment.__main__:main'
]
}
)
| Change the console script name to the same name as the project. | Change the console script name to the same name as the project.
| Python | mit | paulfanelli/planet_alignment | ---
+++
@@ -13,7 +13,7 @@
tests_require=['pytest'],
entry_points={
'console_scripts': [
- 'alignment = planet_alignment.__main__:main'
+ 'planet_alignment = planet_alignment.__main__:main'
]
}
) |
8e24be46bfcb59dc8a4a9ba8d73f21aa32d18683 | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
with open('requirements.txt') as fobj:
install_requires = [line.strip() for line in fobj]
with open('README.rst') as fobj:
long_description = fobj.read()
with open('version.txt') as fobj:
version = fobj.read().strip()
packages = find_packages(exclude=['tests*'])
scripts = [
'runserver.py',
'registerclient.py',
]
setup(
name='opwen_email_server',
version=version,
author='Clemens Wolff',
author_email='clemens.wolff+pypi@gmail.com',
packages=packages,
url='https://github.com/ascoderu/opwen-cloudserver',
license='License :: OSI Approved :: Apache Software License',
description='Email server for the Opwen project',
long_description=long_description,
scripts=scripts,
include_package_data=True,
install_requires=install_requires)
| from setuptools import find_packages
from setuptools import setup
with open('requirements.txt') as fobj:
install_requires = [line.strip() for line in fobj]
with open('README.rst') as fobj:
long_description = fobj.read()
with open('version.txt') as fobj:
version = fobj.read().strip()
packages = find_packages(exclude=['tests*'])
scripts = [
'runserver.py',
'registerclient.py',
]
setup(
name='opwen_email_server',
version=version,
author='Clemens Wolff',
author_email='clemens.wolff+pypi@gmail.com',
packages=packages,
url='https://github.com/ascoderu/opwen-cloudserver',
license='Apache Software License',
description='Email server for the Opwen project',
long_description=long_description,
scripts=scripts,
include_package_data=True,
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Communications :: Email',
])
| Add some PyPI tags to package | Add some PyPI tags to package
| Python | apache-2.0 | ascoderu/opwen-cloudserver,ascoderu/opwen-cloudserver | ---
+++
@@ -28,9 +28,16 @@
author_email='clemens.wolff+pypi@gmail.com',
packages=packages,
url='https://github.com/ascoderu/opwen-cloudserver',
- license='License :: OSI Approved :: Apache Software License',
+ license='Apache Software License',
description='Email server for the Opwen project',
long_description=long_description,
scripts=scripts,
include_package_data=True,
- install_requires=install_requires)
+ install_requires=install_requires,
+ classifiers=[
+ 'Development Status :: 3 - Alpha',
+ 'Environment :: Web Environment',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Programming Language :: Python :: 3',
+ 'Topic :: Communications :: Email',
+ ]) |
6463ae4da70e27d85a6036d5094548679742a2fe | setup.py | setup.py | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='ETLT',
version='0.0.18',
description='Extract Transform Load Transform ',
# long_description=long_description,
url='https://github.com/SetBased/py-etlt',
author='Paul Water',
author_email='info@setbased.nl',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database'
],
keywords='ETLT, ETL, ELT, Extract, Transform, Load, DWH, Data, Warehouse',
packages=find_packages(exclude=['build', 'test']),
install_requires=[]
)
| from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ETLT',
version='0.0.18',
description='Extract Transform Load - but not in that strict order',
long_description=long_description,
url='https://github.com/SetBased/py-etlt',
author='Paul Water',
author_email='info@setbased.nl',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database'
],
keywords='ETLT, ETL, ELT, Extract, Transform, Load, DWH, Data, Warehouse',
packages=find_packages(exclude=['build', 'test']),
install_requires=[]
)
| Update long description of package. | Update long description of package.
| Python | mit | SetBased/py-etlt | ---
+++
@@ -4,16 +4,16 @@
here = path.abspath(path.dirname(__file__))
-# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
-# long_description = f.read()
+with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
+ long_description = f.read()
setup(
name='ETLT',
version='0.0.18',
- description='Extract Transform Load Transform ',
- # long_description=long_description,
+ description='Extract Transform Load - but not in that strict order',
+ long_description=long_description,
url='https://github.com/SetBased/py-etlt',
|
15c7cc3cf1599efa65896e7138f3015e68ae5998 | setup.py | setup.py | #!/usr/bin/env python
import sys
from setuptools import setup, find_packages
requires = ['six']
if sys.version_info[0] == 2:
requires += ['python-dateutil>=1.0, <2.0, >=2.1']
else:
# Py3k
requires += ['python-dateutil>=2.0']
setup(
name='freezegun',
version='0.2.2',
description='Let your Python tests travel through time',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/freezegun',
packages=find_packages(exclude=("tests", "tests.*",)),
install_requires=requires,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
| #!/usr/bin/env python
import sys
from setuptools import setup, find_packages
requires = ['six']
if sys.version_info[0] == 2:
requires += ['python-dateutil>=1.0, != 2.0']
else:
# Py3k
requires += ['python-dateutil>=2.0']
setup(
name='freezegun',
version='0.2.2',
description='Let your Python tests travel through time',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/freezegun',
packages=find_packages(exclude=("tests", "tests.*",)),
install_requires=requires,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
| Use a saner requirements for python-dateutil | Use a saner requirements for python-dateutil
The requirement >=1.0, <2.0, >=2.1 doesn't make a lot of logical sense and it
will break in the future. There is no version that is >= 1.0, and < 2.0, and
>= 2.1 becasue these versions are mutually exclusive. Even if you interpret
the , as OR it still doesn't make sense because this includes every version.
What this spec is actually trying to represent is any version >= 1.0 but not
2.0, so instead we'll just say that. | Python | apache-2.0 | spulec/freezegun,Affirm/freezegun,adamchainz/freezegun,Sun77789/freezegun | ---
+++
@@ -6,7 +6,7 @@
requires = ['six']
if sys.version_info[0] == 2:
- requires += ['python-dateutil>=1.0, <2.0, >=2.1']
+ requires += ['python-dateutil>=1.0, != 2.0']
else:
# Py3k
requires += ['python-dateutil>=2.0'] |
f1474d683b068356567e316c7a1fa177404ae7ab | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import setup, find_packages
from axes import get_version
setup(
name='django-axes',
version=get_version(),
description="Keep track of failed login attempts in Django-powered sites.",
long_description=(
codecs.open("README.rst", encoding='utf-8').read() + '\n' +
codecs.open("CHANGES.txt", encoding='utf-8').read()),
keywords='authentication django pci security'.split(),
author='Josh VanderLinden, Philip Neustrom, Michael Blume, Camilo Nova',
author_email='codekoala@gmail.com',
maintainer='Alex Clark',
maintainer_email='aclark@aclark.net',
url='https://github.com/django-pci/django-axes',
license='MIT',
package_dir={'axes': 'axes'},
install_requires=['pytz', 'django-appconf'],
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Internet :: Log Analysis',
'Topic :: Security',
'Topic :: System :: Logging',
],
zip_safe=False,
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import setup, find_packages
from axes import get_version
setup(
name='django-axes',
version=get_version(),
description="Keep track of failed login attempts in Django-powered sites.",
long_description=(
codecs.open("README.rst", encoding='utf-8').read() + '\n' +
codecs.open("CHANGES.txt", encoding='utf-8').read()),
keywords='authentication django pci security'.split(),
author='Josh VanderLinden, Philip Neustrom, Michael Blume, Camilo Nova',
author_email='codekoala@gmail.com',
maintainer='Alex Clark',
maintainer_email='aclark@aclark.net',
url='https://github.com/django-pci/django-axes',
license='MIT',
package_dir={'axes': 'axes'},
install_requires=['pytz', 'django-appconf'],
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: Log Analysis',
'Topic :: Security',
'Topic :: System :: Logging',
],
zip_safe=False,
)
| Remove support for python 2.x | Remove support for python 2.x
| Python | mit | django-pci/django-axes,jazzband/django-axes | ---
+++
@@ -32,7 +32,6 @@
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Internet :: Log Analysis',
'Topic :: Security', |
22cb266fcec5c0b3b299a3a974e79810d5ecdbaf | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='wagtailcodeblock',
version="0.2.2",
description='Wagtail Code Block provides PrismJS syntax highlighting in Wagtail.',
long_description='A work-in-progress alpha of a Wagtail Streamfield block for source code with real-time syntax highlighting.',
author='Tim Allen',
author_email='tallen@wharton.upenn.edu',
url='https://github.com/FlipperPA/wagtailcodeblock',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
install_requires=[
'wagtail>=1.8',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| from setuptools import setup, find_packages
setup(
name='wagtailcodeblock',
version="0.2.3",
description='Wagtail Code Block provides PrismJS syntax highlighting in Wagtail.',
long_description='A work-in-progress alpha of a Wagtail Streamfield block for source code with real-time syntax highlighting.',
author='Tim Allen',
author_email='tallen@wharton.upenn.edu',
url='https://github.com/FlipperPA/wagtailcodeblock',
include_package_data=True,
packages=find_packages(),
zip_safe=False,
install_requires=[
'wagtail>=1.8',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| Switch to local JS/CSS assets - bump version. | Switch to local JS/CSS assets - bump version.
| Python | bsd-3-clause | FlipperPA/wagtailcodeblock,FlipperPA/wagtailcodeblock,FlipperPA/wagtailcodeblock | ---
+++
@@ -1,7 +1,7 @@
from setuptools import setup, find_packages
setup(
name='wagtailcodeblock',
- version="0.2.2",
+ version="0.2.3",
description='Wagtail Code Block provides PrismJS syntax highlighting in Wagtail.',
long_description='A work-in-progress alpha of a Wagtail Streamfield block for source code with real-time syntax highlighting.',
author='Tim Allen', |
2320fba5befcca313626fac1572b5245f454d675 | setup.py | setup.py | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
from oscar_support import get_version
def location(path):
return os.path.join(os.path.dirname(__file__))
setup(
name='django-oscar-support',
version=get_version(),
url='https://github.com/tangentlabs/django-oscar-support',
author="Sebastian Vetter",
author_email="sebastian.vetter@tangentsnowball.com.au",
description="Ticketing and customer support for Oscar",
long_description=open(location('README.rst')).read(),
keywords="django, oscar, e-commerce, customer support, issue tracking",
license='BSD',
platforms=['linux'],
packages=find_packages(exclude=["sandbox*", "tests*"]),
include_package_data=True,
install_requires=[
'django-shortuuidfield',
'Django>=1.4',
'django-oscar',
'django-extensions',
'django-extra-views>=0.5.2',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python'
]
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from oscar_support import get_version
setup(
name='django-oscar-support',
version=get_version(),
url='https://github.com/tangentlabs/django-oscar-support',
author="Sebastian Vetter",
author_email="sebastian.vetter@tangentsnowball.com.au",
description="Ticketing and customer support for Oscar",
long_description=open('README.rst').read(),
keywords="django, oscar, e-commerce, customer support, issue tracking",
license='BSD',
platforms=['linux'],
packages=find_packages(exclude=["sandbox*", "tests*"]),
include_package_data=True,
install_requires=[
'django-shortuuidfield',
'Django>=1.4',
'django-oscar',
'django-extensions',
'django-extra-views>=0.5.2',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python'
]
)
| Fix long description in README | Fix long description in README
| Python | bsd-3-clause | snowball-one/django-oscar-support,snowball-one/django-oscar-support,snowball-one/django-oscar-support | ---
+++
@@ -1,13 +1,7 @@
#!/usr/bin/env python
-import os
-
from setuptools import setup, find_packages
from oscar_support import get_version
-
-
-def location(path):
- return os.path.join(os.path.dirname(__file__))
setup(
@@ -17,7 +11,7 @@
author="Sebastian Vetter",
author_email="sebastian.vetter@tangentsnowball.com.au",
description="Ticketing and customer support for Oscar",
- long_description=open(location('README.rst')).read(),
+ long_description=open('README.rst').read(),
keywords="django, oscar, e-commerce, customer support, issue tracking",
license='BSD',
platforms=['linux'], |
518fdf73b0d009f8bbe46054154f293ba209a544 | setup.py | setup.py | from setuptools import find_packages, setup
import os
import re
def read(*parts):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *parts)) as f:
return f.read()
VERSION = re.search(
"^__version__ = '(.*)'$",
read('src', 'http_crawler', '__init__.py'),
re.MULTILINE
).group(1)
if __name__ == '__main__':
setup(
name='http-crawler',
version=VERSION,
description='A library for crawling websites',
long_description=read('README.rst'),
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=['lxml', 'requests', 'tinycss2'],
url='http://github.com/inglesp/http-crawler',
author='Peter Inglesby',
author_email='peter.inglesby@gmail.com',
license='License :: OSI Approved :: MIT License',
)
| from setuptools import find_packages, setup
import os
import re
def read(*parts):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *parts)) as f:
return f.read()
VERSION = re.search(
"^__version__ = '(.*)'$",
read('src', 'http_crawler', '__init__.py'),
re.MULTILINE
).group(1)
if __name__ == '__main__':
setup(
name='http-crawler',
version=VERSION,
description='A library for crawling websites',
long_description=read('README.rst'),
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=['lxml', 'requests', 'tinycss2'],
url='http://github.com/inglesp/http-crawler',
author='Peter Inglesby',
author_email='peter.inglesby@gmail.com',
license='License :: OSI Approved :: MIT License',
)
| Add line to make flake8 happy | Add line to make flake8 happy | Python | mit | inglesp/http-crawler,inglesp/http-crawler,inglesp/http-crawler | ---
+++
@@ -8,6 +8,7 @@
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *parts)) as f:
return f.read()
+
VERSION = re.search(
"^__version__ = '(.*)'$", |
146aedd8b65c1fb110f16579bd812d7adad3cc1f | setup.py | setup.py | # -*- encoding: utf-8 -*-
from setuptools import setup
description = """
Simple, powerfull and nonobstructive django email middleware.
"""
setup(
name="djmail",
url="https://github.com/niwibe/djmail",
author="Andrey Antukh",
author_email="niwi@niwi.be",
version="0.8",
packages=[
"djmail",
"djmail.backends",
"djmail.management",
"djmail.management.commands",
],
description=description.strip(),
zip_safe=False,
include_package_data=True,
package_data={
"": ["*.html"],
},
classifiers=[
# "Development Status :: 5 - Production/Stable",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| # -*- encoding: utf-8 -*-
from setuptools import find_packages, setup
description = """
Simple, powerfull and nonobstructive django email middleware.
"""
setup(
name="djmail",
url="https://github.com/niwibe/djmail",
author="Andrey Antukh",
author_email="niwi@niwi.be",
version="0.8",
packages=find_packages(include=['djmail*']),
description=description.strip(),
zip_safe=False,
include_package_data=True,
package_data={
"": ["*.html"],
},
classifiers=[
# "Development Status :: 5 - Production/Stable",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| Replace hardcoded packages by find_packages | Replace hardcoded packages by find_packages
| Python | bsd-3-clause | CloudNcodeInc/djmail,CloudNcodeInc/djmail,CloudNcodeInc/djmail | ---
+++
@@ -1,6 +1,6 @@
# -*- encoding: utf-8 -*-
-from setuptools import setup
+from setuptools import find_packages, setup
description = """
Simple, powerfull and nonobstructive django email middleware.
@@ -12,12 +12,7 @@
author="Andrey Antukh",
author_email="niwi@niwi.be",
version="0.8",
- packages=[
- "djmail",
- "djmail.backends",
- "djmail.management",
- "djmail.management.commands",
- ],
+ packages=find_packages(include=['djmail*']),
description=description.strip(),
zip_safe=False,
include_package_data=True, |
2b5a13a32dce747d9dc9f5611aadf40969239cf0 | setup.py | setup.py | from setuptools import setup, find_packages
import sys, os
setup(name='cc.license',
version='0.01',
description="License selection based on ccREL-based metadata.",
classifiers=[],
keywords='',
author='Creative Commons',
author_email='software@creativecommons.org',
url='http://wiki.creativecommons.org/CcLicense',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
#package_data={'cc.license': ['*.xml', '*.txt']}, # doesn't work
data_files=[('cc/license/rdf', ['license.rdf/rdf/index.rdf',
'license.rdf/rdf/selectors.rdf',
'license.rdf/rdf/jurisdictions.rdf']),
('cc/license/xml', ['license.rdf/xml/questions.xml'])],
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'setuptools',
'zope.interface',
'nose',
'Genshi',
],
setup_requires=['setuptools-git',],
entry_points="""
# -*- Entry points: -*-
""",
)
| from setuptools import setup, find_packages
import sys, os
setup(name='cc.license',
version='0.01',
description="License selection based on ccREL-based metadata.",
classifiers=[],
keywords='',
author='Creative Commons',
author_email='software@creativecommons.org',
url='http://wiki.creativecommons.org/CcLicense',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
#package_data={'cc.license': ['*.xml', '*.txt']}, # doesn't work
data_files=[('cc/license/rdf', ['license.rdf/rdf/index.rdf',
'license.rdf/rdf/selectors.rdf',
'license.rdf/rdf/jurisdictions.rdf']),
('cc/license/xml', ['license.rdf/xml/questions.xml'])],
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'setuptools',
'zope.interface',
'nose',
'Genshi',
'pylons', # XXX why does nose throw a RuntimeWarning without this?
],
setup_requires=['setuptools-git',],
entry_points="""
# -*- Entry points: -*-
[nose.plugins]
pylons = pylons.test:PylonsPlugin
""",
)
| Solve RuntimeWarning that arose without knowing why. | Solve RuntimeWarning that arose without knowing why.
| Python | mit | creativecommons/cc.license,creativecommons/cc.license | ---
+++
@@ -24,9 +24,12 @@
'zope.interface',
'nose',
'Genshi',
+ 'pylons', # XXX why does nose throw a RuntimeWarning without this?
],
setup_requires=['setuptools-git',],
entry_points="""
# -*- Entry points: -*-
+ [nose.plugins]
+ pylons = pylons.test:PylonsPlugin
""",
) |
8684e06ed7056171d86ccdb5943bdf28c518c589 | setup.py | setup.py | from setuptools import setup
setup(
name='jobcli',
version='0.1.a1',
py_modules=['jobcli'],
install_requires=['click', 'requests',],
entry_points={'console_scripts':['jobcli=jobcli:cli',]},
url='https://www.jobcli.com',
author='Stephan Goergen',
author_email='stephan.goergen@gmail.com',
description='Job Search from the Command Line',
license='MIT',
zip_safe=False,
include_package_data=False,
keywords='board job search command line career developer engineer',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent'
'Natural Language :: English',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'Topic :: Office/Business',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| from setuptools import setup
setup(
name='jobcli',
version='0.1.a1',
py_modules=['jobcli'],
install_requires=['click', 'requests',],
entry_points={'console_scripts':['jobcli=jobcli:cli',]},
url='https://www.jobcli.com',
author='Stephan Goergen',
author_email='stephan.goergen@gmail.com',
description='Job Search from the Command Line',
license='MIT',
zip_safe=False,
include_package_data=False,
keywords='board job search command line career developer engineer',
classifiers=[
'License :: OSI Approved :: MIT License'
,'Development Status :: 3 - Alpha'
,'Environment :: Console'
,'Operating System :: OS Independent'
,'Natural Language :: English'
,'Intended Audience :: Developers'
,'Intended Audience :: Information Technology'
,'Intended Audience :: System Administrators'
,'Intended Audience :: Science/Research'
,'Topic :: Office/Business'
,'Programming Language :: Python :: 2'
,'Programming Language :: Python :: 2.7'
,'Programming Language :: Python :: 3'
,'Programming Language :: Python :: 3.3'
,'Programming Language :: Python :: 3.4'
,'Programming Language :: Python :: 3.5'
]
)
| Add missing comma in classifiers. | Add missing comma in classifiers.
| Python | mit | jobcli/jobcli-app,jobcli/jobcli-app | ---
+++
@@ -15,22 +15,22 @@
include_package_data=False,
keywords='board job search command line career developer engineer',
classifiers=[
- 'License :: OSI Approved :: MIT License',
- 'Development Status :: 3 - Alpha',
- 'Environment :: Console',
- 'Operating System :: OS Independent'
- 'Natural Language :: English',
- 'Intended Audience :: Developers',
- 'Intended Audience :: Information Technology',
- 'Intended Audience :: System Administrators',
- 'Intended Audience :: Science/Research',
- 'Topic :: Office/Business',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
- ],
+ 'License :: OSI Approved :: MIT License'
+ ,'Development Status :: 3 - Alpha'
+ ,'Environment :: Console'
+ ,'Operating System :: OS Independent'
+ ,'Natural Language :: English'
+ ,'Intended Audience :: Developers'
+ ,'Intended Audience :: Information Technology'
+ ,'Intended Audience :: System Administrators'
+ ,'Intended Audience :: Science/Research'
+ ,'Topic :: Office/Business'
+ ,'Programming Language :: Python :: 2'
+ ,'Programming Language :: Python :: 2.7'
+ ,'Programming Language :: Python :: 3'
+ ,'Programming Language :: Python :: 3.3'
+ ,'Programming Language :: Python :: 3.4'
+ ,'Programming Language :: Python :: 3.5'
+ ]
)
|
c1eafa32f9fafa859a0fcaf047f4a80b9bc52969 | setup.py | setup.py | #!/usr/bin/env python
import os
from glob import glob
from distutils.core import setup
setup(
name='whisper',
version='0.9.10',
url='https://launchpad.net/graphite',
author='Chris Davis',
author_email='chrismd@gmail.com',
license='Apache Software License 2.0',
description='Fixed size round-robin style database',
py_modules=['whisper'],
scripts=glob('bin/*'),
)
| #!/usr/bin/env python
import os
from glob import glob
from distutils.core import setup
setup(
name='whisper',
version='0.9.10',
url='https://launchpad.net/graphite',
author='Chris Davis',
author_email='chrismd@gmail.com',
license='Apache Software License 2.0',
description='Fixed size round-robin style database',
py_modules=['whisper'],
scripts=glob('bin/*'),
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| Add PyPI classifiers for python versions | Add PyPI classifiers for python versions
| Python | apache-2.0 | penpen/whisper,deniszh/whisper,graphite-server/whisper,alexandreboisvert/whisper,akbooer/whisper,graphite-project/whisper,cbowman0/whisper,piotr1212/whisper,kerlandsson/whisper,obfuscurity/whisper | ---
+++
@@ -15,4 +15,12 @@
description='Fixed size round-robin style database',
py_modules=['whisper'],
scripts=glob('bin/*'),
+ classifiers=[
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ ],
) |
b99fa752feceded0ff6ea1e46e1966ceb76abff2 | setup.py | setup.py | import os
from setuptools import find_packages
from setuptools import setup
cur_dir = os.path.dirname(__file__)
readme = os.path.join(cur_dir, 'README.md')
if os.path.exists(readme):
with open(readme) as fh:
long_description = fh.read()
else:
long_description = ''
setup(
name='walrus',
version=__import__('walrus').__version__,
description='walrus',
long_description=long_description,
author='Charles Leifer',
author_email='coleifer@gmail.com',
url='http://github.com/coleifer/walrus/',
install_requires=['redis'],
packages=find_packages(),
package_data={
'walrus': [
'scripts/*',
'stopwords.txt',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
test_suite='walrus.tests',
)
| import os
from setuptools import find_packages
from setuptools import setup
cur_dir = os.path.dirname(__file__)
readme = os.path.join(cur_dir, 'README.md')
if os.path.exists(readme):
with open(readme) as fh:
long_description = fh.read()
else:
long_description = ''
setup(
name='walrus',
version=__import__('walrus').__version__,
description='walrus',
long_description=long_description,
author='Charles Leifer',
author_email='coleifer@gmail.com',
url='http://github.com/coleifer/walrus/',
install_requires=['redis'],
packages=find_packages(),
package_data={
'walrus': [
'scripts/*',
'stopwords.txt',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='walrus.tests',
)
| Add Python specific Trove classifiers. | Add Python specific Trove classifiers.
| Python | mit | coleifer/walrus | ---
+++
@@ -34,6 +34,12 @@
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
],
test_suite='walrus.tests',
) |
f891dbeca1b9c3cd23bcb1b70ebad149a8f57d4b | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='gears-coffeescript',
version='0.1.dev',
url='https://github.com/gears/gears',
license='ISC',
author='Mike Yumatov',
author_email='mike@yumatov.org',
description='CoffeeScript compiler for Gears',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| import os
from setuptools import setup, find_packages
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='gears-coffeescript',
version='0.1.dev',
url='https://github.com/gears/gears',
license='ISC',
author='Mike Yumatov',
author_email='mike@yumatov.org',
description='CoffeeScript compiler for Gears',
long_description=read('README.rst'),
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| Read package long description from readme | Read package long description from readme
| Python | isc | gears/gears-coffeescript,gears/gears-coffeescript | ---
+++
@@ -1,4 +1,9 @@
+import os
from setuptools import setup, find_packages
+
+
+def read(filename):
+ return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
@@ -9,6 +14,7 @@
author='Mike Yumatov',
author_email='mike@yumatov.org',
description='CoffeeScript compiler for Gears',
+ long_description=read('README.rst'),
packages=find_packages(),
include_package_data=True,
classifiers=[ |
56cd2b9804718caeb8728c3b01fb6f0bc0f2d0d4 | setup.py | setup.py | # -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='7.0.8',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
| # -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='7.0.9',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
| Update the PyPI version to 7.0.9. | Update the PyPI version to 7.0.9.
| Python | mit | Doist/todoist-python | ---
+++
@@ -10,7 +10,7 @@
setup(
name='todoist-python',
- version='7.0.8',
+ version='7.0.9',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com', |
1f5183c345444f35891927014d92510b66b96f5b | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
setup(
name='robber',
version='1.0.0',
description='BDD / TDD assertion library for Python',
long_description=long_description,
author='Tao Liang',
author_email='tao@synapse-ai.com',
url='https://github.com/vesln/robber.py',
packages=[
'robber',
'robber.matchers',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='robber',
version='1.0.1',
description='BDD / TDD assertion library for Python',
author='Tao Liang',
author_email='tao@synapse-ai.com',
url='https://github.com/vesln/robber.py',
packages=[
'robber',
'robber.matchers',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
],
)
| Remove the dependency on pypandoc. It's unfortunate but we don't want to add dependency just for readability on pypi. | Remove the dependency on pypandoc. It's unfortunate but we don't want to add dependency just for readability on pypi.
| Python | mit | vesln/robber.py,taoenator/robber.py | ---
+++
@@ -1,15 +1,11 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
-import pypandoc
-
-long_description = pypandoc.convert('README.md', 'rst')
setup(
name='robber',
- version='1.0.0',
+ version='1.0.1',
description='BDD / TDD assertion library for Python',
- long_description=long_description,
author='Tao Liang',
author_email='tao@synapse-ai.com',
url='https://github.com/vesln/robber.py', |
efaeb8fe4458cd6b214896ad72f3d41c25c23313 | misc/python/botan/__init__.py | misc/python/botan/__init__.py | from _botan import *
init = LibraryInitializer()
class SymmetricKey(OctetString):
pass
class InitializationVector(OctetString):
pass
def Filter(name, key = None, iv = None, dir = None):
if key != None and iv != None and dir != None:
return make_filter(name, key, iv, dir)
elif key != None and dir != None:
return make_filter(name, key, dir)
elif key != None:
return make_filter(name, key)
else:
return make_filter(name)
def Pipe(*filters):
pipe = PipeObj()
for filter in filters:
if filter:
pipe.append(filter)
return pipe
#def Filter(name, key):
# return make_filter(name, key)
| from _botan import *
# Initialize the library when the module is imported
init = LibraryInitializer()
class SymmetricKey(OctetString):
pass
class InitializationVector(OctetString):
pass
def Filter(name, key = None, iv = None, dir = None):
if key != None and iv != None and dir != None:
return make_filter(name, key, iv, dir)
elif key != None and dir != None:
return make_filter(name, key, dir)
elif key != None:
return make_filter(name, key)
else:
return make_filter(name)
def Pipe(*filters):
pipe = PipeObj()
for filter in filters:
if filter:
pipe.append(filter)
return pipe
| Remove an old version of the Filter() wrapper function | Remove an old version of the Filter() wrapper function
| Python | bsd-2-clause | Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,webmaster128/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,webmaster128/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan | ---
+++
@@ -1,5 +1,6 @@
from _botan import *
+# Initialize the library when the module is imported
init = LibraryInitializer()
class SymmetricKey(OctetString):
@@ -24,6 +25,3 @@
if filter:
pipe.append(filter)
return pipe
-
-#def Filter(name, key):
-# return make_filter(name, key) |
22ea99b8183f11c3fc9f39810892256a2989ced5 | setup.py | setup.py | """
Flask-Twilio
-------------
Make Twilio voice/SMS calls with Flask
"""
from setuptools import setup
exec(open('flask_twilio.py').readline())
setup(
name='Flask-Twilio',
version=__version__,
url='http://example.com/flask-twilio/',
license='BSD',
author='Leo Singer',
author_email='leo.singer@ligo.org',
description='Make Twilio voice/SMS calls with Flask',
long_description=__doc__,
py_modules=['flask_twilio'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'itsdangerous',
'Flask',
'six',
'twilio'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Telephony'
]
) | """
Flask-Twilio
-------------
Make Twilio voice/SMS calls with Flask
"""
from setuptools import setup
exec(open('flask_twilio.py').readline())
setup(
name='Flask-Twilio',
version=__version__,
url='http://example.com/flask-twilio/',
license='BSD',
author='Leo Singer',
author_email='leo.singer@ligo.org',
description='Make Twilio voice/SMS calls with Flask',
long_description=__doc__,
py_modules=['flask_twilio'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'itsdangerous',
'Flask',
'six',
'twilio'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Telephony'
]
) | Add classifiers to document Python version support | Add classifiers to document Python version support
| Python | bsd-3-clause | lpsinger/flask-twilio | ---
+++
@@ -39,6 +39,10 @@
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Telephony' |
27a9d12b7547c7a14f30c2c06c52e311a960f3ac | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='gdcdatamodel',
packages=find_packages(),
install_requires=[
'pytz==2016.4',
'graphviz==0.4.2',
'jsonschema==2.5.1',
'psqlgraph',
'gdcdictionary',
'cdisutils',
'python-dateutil==2.4.2',
],
package_data={
"gdcdatamodel": [
"xml_mappings/*.yaml",
]
},
dependency_links=[
'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils',
'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph',
'git+https://github.com/NCI-GDC/gdcdictionary.git@6d404dbd1dd45ed35d0aef6d33c43dbbd982930d#egg=gdcdictionary',
],
entry_points={
'console_scripts': [
'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main'
]
},
)
| from setuptools import setup, find_packages
setup(
name='gdcdatamodel',
packages=find_packages(),
install_requires=[
'pytz==2016.4',
'graphviz==0.4.2',
'jsonschema==2.5.1',
'psqlgraph',
'gdcdictionary',
'cdisutils',
'python-dateutil==2.4.2',
],
package_data={
"gdcdatamodel": [
"xml_mappings/*.yaml",
]
},
dependency_links=[
'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils',
'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph',
'git+https://github.com/NCI-GDC/gdcdictionary.git@1.12#egg=gdcdictionary',
],
entry_points={
'console_scripts': [
'gdc_postgres_admin=gdcdatamodel.gdc_postgres_admin:main'
]
},
)
| Update pins for dictionary to release tag | chore(pins): Update pins for dictionary to release tag
- Update pins for dictionary to release tag
| Python | apache-2.0 | NCI-GDC/gdcdatamodel,NCI-GDC/gdcdatamodel | ---
+++
@@ -20,7 +20,7 @@
dependency_links=[
'git+https://github.com/NCI-GDC/cdisutils.git@4a75cc05c7ba2174e70cca9c9ea7e93947f7a868#egg=cdisutils',
'git+https://github.com/NCI-GDC/psqlgraph.git@7b5de7d56aa3159a9526940eb273579ddbf084ca#egg=psqlgraph',
- 'git+https://github.com/NCI-GDC/gdcdictionary.git@6d404dbd1dd45ed35d0aef6d33c43dbbd982930d#egg=gdcdictionary',
+ 'git+https://github.com/NCI-GDC/gdcdictionary.git@1.12#egg=gdcdictionary',
],
entry_points={
'console_scripts': [ |
203cb874331a5df40d297ed6143b75ddbf8cda1e | setup.py | setup.py | from setuptools import setup
import os
import imbox
version = imbox.__version__
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='imbox',
version=version,
description="Python IMAP for Human beings",
long_description=read('README.rst'),
keywords='email, IMAP, parsing emails',
author='Martin Rusev',
author_email='martin@amon.cx',
url='https://github.com/martinrusev/imbox',
license='MIT',
packages=['imbox', 'imbox.vendors'],
package_dir={'imbox': 'imbox'},
zip_safe=False,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
test_suite='tests',
)
| from setuptools import setup
import os
import imbox
version = imbox.__version__
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='imbox',
version=version,
description="Python IMAP for Human beings",
long_description=read('README.rst'),
keywords='email, IMAP, parsing emails',
author='Martin Rusev',
author_email='martin@amon.cx',
url='https://github.com/martinrusev/imbox',
license='MIT',
packages=['imbox', 'imbox.vendors'],
package_dir={'imbox': 'imbox'},
install_requires=[
'chardet',
],
zip_safe=False,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
test_suite='tests',
)
| Add chardet as a dependency | Add chardet as a dependency
| Python | mit | martinrusev/imbox | ---
+++
@@ -8,6 +8,7 @@
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
+
setup(
name='imbox',
@@ -21,6 +22,9 @@
license='MIT',
packages=['imbox', 'imbox.vendors'],
package_dir={'imbox': 'imbox'},
+ install_requires=[
+ 'chardet',
+ ],
zip_safe=False,
classifiers=(
'Programming Language :: Python', |
04e111ff36d1c2769bca74d4271cbaa996dd2bae | plugins/shutup_slackbot.py | plugins/shutup_slackbot.py | from rtmbot.core import Plugin
class ShutupSlackbot(Plugin):
def process_message(self, data):
if data["text"] in ["Bpye Berg Nyberg"]:
self.outputs.append([data["channel"], "Hold kjeft, slackbot..."])
| from rtmbot.core import Plugin
class ShutupSlackbotPlugin(Plugin):
def process_message(self, data):
if data["text"] in ["Bpye Berg Nyberg"]:
self.outputs.append([data["channel"], "Hold kjeft, slackbot..."])
| Fix mising Plugin in class name | Fix mising Plugin in class name
| Python | mit | RadioRevolt/SlackBot | ---
+++
@@ -1,7 +1,7 @@
from rtmbot.core import Plugin
-class ShutupSlackbot(Plugin):
+class ShutupSlackbotPlugin(Plugin):
def process_message(self, data):
if data["text"] in ["Bpye Berg Nyberg"]:
self.outputs.append([data["channel"], "Hold kjeft, slackbot..."]) |
3a37211f09c000f0fcb41ca076cb98b90bfae030 | eb_sqs/urls.py | eb_sqs/urls.py | from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from eb_sqs.views import process_task
app_name = 'eb_sqs'
urlpatterns = [
url(r'^process$', process_task, name='process_task'),
]
| from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from eb_sqs.views import process_task
urlpatterns = [
url(r'^process$', process_task, name='process_task'),
]
| Remove unnecessary global variable assignment | Remove unnecessary global variable assignment
| Python | mit | cuda-networks/django-eb-sqs,cuda-networks/django-eb-sqs | ---
+++
@@ -4,7 +4,6 @@
from eb_sqs.views import process_task
-app_name = 'eb_sqs'
urlpatterns = [
url(r'^process$', process_task, name='process_task'),
] |
5edc89f4ba516ba497f2b171bba865487116bbe0 | camoco/__init__.py | camoco/__init__.py | """
Camoco Library - CoAnalysis of Molecular Components
CacheMoneyCorn
"""
__license__ = """
Creative Commons Non-Commercial 4.0 Generic
http://creativecommons.org/licenses/by-nc/4.0/
"""
__version__ = '0.3.0'
import sys
import os
import numpy
import pyximport
pyximport.install(setup_args={
"include_dirs":numpy.get_include()
})
import matplotlib
matplotlib.use('Agg')
from .Config import cf
from .Camoco import Camoco
from .Expr import Expr
from .COB import COB
from .RefGen import RefGen
from .RefGenDist import *
from .PCCUP import *
from .Ontology import Ontology,Term
from .GWAS import GWAS
from .HapMap import HapMap
from .Locus import Locus
from .Tools import available_datasets,del_dataset
from .Tools import mv_dataset,redescribe_dataset
from .GEO import Family
from .GOnt import GOnt
from .Annotation import GWASData
# Create yourself
Camoco.create('Camoco','Mother Database')
| """
Camoco Library - CoAnalysis of Molecular Components
CacheMoneyCorn
"""
__license__ = """
Creative Commons Non-Commercial 4.0 Generic
http://creativecommons.org/licenses/by-nc/4.0/
"""
__version__ = '0.3.0-dev'
import sys
import os
import numpy
import pyximport
pyximport.install(setup_args={
"include_dirs":numpy.get_include()
})
import matplotlib
matplotlib.use('Agg')
from .Config import cf
from .Camoco import Camoco
from .Expr import Expr
from .COB import COB
from .RefGen import RefGen
from .RefGenDist import *
from .PCCUP import *
from .Ontology import Ontology,Term
from .GWAS import GWAS
from .HapMap import HapMap
from .Locus import Locus
from .Tools import available_datasets,del_dataset
from .Tools import mv_dataset,redescribe_dataset
from .GEO import Family
from .GOnt import GOnt
from .Annotation import GWASData
# Create yourself
Camoco.create('Camoco','Mother Database')
| Add dev option to version string | Add dev option to version string
| Python | mit | schae234/Camoco,schae234/Camoco | ---
+++
@@ -11,7 +11,7 @@
http://creativecommons.org/licenses/by-nc/4.0/
"""
-__version__ = '0.3.0'
+__version__ = '0.3.0-dev'
import sys
import os |
1270c31dcf35c17a26a282605d2e04ffd2e8d985 | tests/test_ftp.py | tests/test_ftp.py | from wex.url import URL
expected_lines = [
b"FTP/1.0 200 OK\r\n",
b"X-wex-url: ftp://anonymous:me@ftp.kernel.org/pub/site/README\r\n",
b"\r\n",
b"This directory contains files related to the operation of the\n",
]
expected_content = b''.join(expected_lines)
url = 'ftp://anonymous:me@ftp.kernel.org/pub/site/README'
def test_ftp_read():
readables = list(URL(url).get())
assert len(readables) == 1
r0 = readables[0]
chunk = r0.read(2**16)
content = chunk
chunk = r0.read(2**16)
assert not chunk
assert content.startswith(expected_content)
def test_ftp_readline():
readables = list(URL(url).get())
assert len(readables) == 1
r0 = readables[0]
first_four_lines = [r0.readline() for i in range(4)]
assert first_four_lines == expected_lines[:4]
| from wex.url import URL
url = 'ftp://anonymous:me@speedtest.tele2.net/1KB.zip'
expected_lines = [
b"FTP/1.0 200 OK\r\n",
b"X-wex-url: " + url + "\r\n",
b"\r\n",
]
expected_content = b''.join(expected_lines)
def test_ftp_read():
readables = list(URL(url).get())
assert len(readables) == 1
r0 = readables[0]
chunk = r0.read(2**16)
content = chunk
chunk = r0.read(2**16)
assert not chunk
assert content.startswith(expected_content)
def test_ftp_readline():
readables = list(URL(url).get())
assert len(readables) == 1
n = 3
r0 = readables[0]
first_few_lines = [r0.readline() for i in range(n)]
assert first_few_lines == expected_lines[:n]
| Switch ftp server now ftp.kernel.org closed | Switch ftp server now ftp.kernel.org closed
| Python | bsd-3-clause | eBay/wextracto,gilessbrown/wextracto,gilessbrown/wextracto,eBay/wextracto | ---
+++
@@ -1,14 +1,13 @@
from wex.url import URL
+
+url = 'ftp://anonymous:me@speedtest.tele2.net/1KB.zip'
expected_lines = [
b"FTP/1.0 200 OK\r\n",
- b"X-wex-url: ftp://anonymous:me@ftp.kernel.org/pub/site/README\r\n",
+ b"X-wex-url: " + url + "\r\n",
b"\r\n",
- b"This directory contains files related to the operation of the\n",
]
expected_content = b''.join(expected_lines)
-
-url = 'ftp://anonymous:me@ftp.kernel.org/pub/site/README'
def test_ftp_read():
@@ -25,6 +24,7 @@
def test_ftp_readline():
readables = list(URL(url).get())
assert len(readables) == 1
+ n = 3
r0 = readables[0]
- first_four_lines = [r0.readline() for i in range(4)]
- assert first_four_lines == expected_lines[:4]
+ first_few_lines = [r0.readline() for i in range(n)]
+ assert first_few_lines == expected_lines[:n] |
749c1254a240053d2ca2abb1cc88d5eb7ae284eb | tests/test_tcv.py | tests/test_tcv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test for the tcv module
"""
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test for the tcv module
"""
def test_placeholder():
""" Write real tests... """
assert True
| Add an empty placeholder tests. | Add an empty placeholder tests.
| Python | isc | wagdav/tcvpy | ---
+++
@@ -4,3 +4,8 @@
"""
Test for the tcv module
"""
+
+
+def test_placeholder():
+ """ Write real tests... """
+ assert True |
d1c1d257a3fb54c6acef4c66c8ed639f48a0c426 | bibliopixel/control/artnet.py | bibliopixel/control/artnet.py | import collections, copy, queue
from . import control
from .. util import artnet_message, log, server_cache, udp
QUEUE_TIMEOUT = 0.1
class ArtNet(control.ExtractedControl):
def __init__(self, *args, ip_address, port=artnet_message.UDP_PORT, **kwds):
super().__init__(*args, **kwds)
self.address = ip_address, port
def _convert(self, msg):
msg = artnet_message.bytes_to_message(msg)
assert any(i for i in msg.data)
msg = collections.OrderedDict((
('type', 'dmx'),
('net', msg.net),
('subUni', msg.subUni),
('data', msg.data)))
return super()._convert(msg)
def _make_thread(self):
return udp.Receiver(self.address, receive=self.receive)
| import collections, copy, queue
from . import control
from .. util import artnet_message, log, server_cache, udp
QUEUE_TIMEOUT = 0.1
class ArtNet(control.ExtractedControl):
def __init__(self, *args, ip_address, port=artnet_message.UDP_PORT, **kwds):
super().__init__(*args, **kwds)
self.address = ip_address, port
def _convert(self, msg):
msg = artnet_message.bytes_to_message(msg)
msg = collections.OrderedDict((
('type', 'dmx'),
('net', msg.net),
('subUni', msg.subUni),
('data', msg.data)))
return super()._convert(msg)
def _make_thread(self):
return udp.Receiver(self.address, receive=self.receive)
| Remove test that crashes ArtNet receiver | Remove test that crashes ArtNet receiver
| Python | mit | rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel | ---
+++
@@ -12,7 +12,6 @@
def _convert(self, msg):
msg = artnet_message.bytes_to_message(msg)
- assert any(i for i in msg.data)
msg = collections.OrderedDict((
('type', 'dmx'),
('net', msg.net), |
5cd66754dc4a53bf9e0631733541c2ed5e4be06e | pvextractor/tests/test_gui.py | pvextractor/tests/test_gui.py | import pytest
from distutils.version import LooseVersion
import matplotlib as mpl
from ..gui import PVSlicer
from .test_slicer import make_test_hdu
try:
import PyQt5
PYQT5OK = True
except ImportError:
PYQT5OK = False
if LooseVersion(mpl.__version__) < LooseVersion('2'):
MPLOK = True
else:
MPLOK = False
@pytest.mark.skipif('not PYQT5OK or not MPLOK')
def test_gui():
hdu = make_test_hdu()
pv = PVSlicer(hdu, clim=(-0.02, 2))
pv.show(block=False)
x = [100, 200, 220, 330, 340]
y = [100, 200, 300, 420, 430]
for i in range(len(x)):
pv.fig.canvas.motion_notify_event(x[i], y[i])
pv.fig.canvas.button_press_event(x[i], y[i], 1)
pv.fig.canvas.key_press_event('enter')
pv.fig.canvas.motion_notify_event(310, 420)
pv.fig.canvas.button_press_event(410, 420, 1)
pv.fig.canvas.draw()
assert pv.pv_slice.data.shape == (5, 2)
| import numpy as np
from ..gui import PVSlicer
from .test_slicer import make_test_hdu
def test_gui():
hdu = make_test_hdu()
pv = PVSlicer(hdu, clim=(-0.02, 2))
pv.show(block=False)
xy_data = np.array([[0.0, 0.1, 0.5, 1.0, 0.5],
[0.0, 0.3, 0.4, 0.9, 1.4]]).T
x, y = pv.ax1.transData.transform(xy_data).T
for i in range(len(x)):
pv.fig.canvas.motion_notify_event(x[i], y[i])
pv.fig.canvas.button_press_event(x[i], y[i], 1)
pv.fig.canvas.key_press_event('enter')
pv.fig.canvas.motion_notify_event(x[-1] - 20, y[-1])
pv.fig.canvas.button_press_event(x[-1] - 20, y[-1], 1)
pv.fig.canvas.draw()
assert pv.pv_slice.data.shape == (5, 2)
| Make GUI test more robust to HiDPI displays | Make GUI test more robust to HiDPI displays | Python | bsd-3-clause | radio-astro-tools/pvextractor,keflavich/pvextractor | ---
+++
@@ -1,42 +1,28 @@
-import pytest
-from distutils.version import LooseVersion
-import matplotlib as mpl
-
-
+import numpy as np
from ..gui import PVSlicer
from .test_slicer import make_test_hdu
-try:
- import PyQt5
- PYQT5OK = True
-except ImportError:
- PYQT5OK = False
+def test_gui():
-if LooseVersion(mpl.__version__) < LooseVersion('2'):
- MPLOK = True
-else:
- MPLOK = False
-
-
-@pytest.mark.skipif('not PYQT5OK or not MPLOK')
-def test_gui():
hdu = make_test_hdu()
pv = PVSlicer(hdu, clim=(-0.02, 2))
pv.show(block=False)
- x = [100, 200, 220, 330, 340]
- y = [100, 200, 300, 420, 430]
+ xy_data = np.array([[0.0, 0.1, 0.5, 1.0, 0.5],
+ [0.0, 0.3, 0.4, 0.9, 1.4]]).T
+
+ x, y = pv.ax1.transData.transform(xy_data).T
for i in range(len(x)):
pv.fig.canvas.motion_notify_event(x[i], y[i])
pv.fig.canvas.button_press_event(x[i], y[i], 1)
pv.fig.canvas.key_press_event('enter')
- pv.fig.canvas.motion_notify_event(310, 420)
- pv.fig.canvas.button_press_event(410, 420, 1)
+ pv.fig.canvas.motion_notify_event(x[-1] - 20, y[-1])
+ pv.fig.canvas.button_press_event(x[-1] - 20, y[-1], 1)
pv.fig.canvas.draw()
|
72a5d74496d8cfd3d2216ac9b5f5ef2d4b054ed0 | backend/loader/model/datafile.py | backend/loader/model/datafile.py | from dataitem import DataItem
class DataFile(DataItem):
def __init__(self, name, access, owner):
super(DataFile, self).__init__(name, access, owner, "datafile")
self.checksum = ""
self.size = 0
self.location = ""
self.mediatype = ""
self.conditions = []
self.text = ""
self.metatags = []
self.datadirs = []
| from dataitem import DataItem
class DataFile(DataItem):
def __init__(self, name, access, owner):
super(DataFile, self).__init__(name, access, owner, "datafile")
self.checksum = ""
self.size = 0
self.location = ""
self.mediatype = ""
self.conditions = []
self.text = ""
self.metatags = []
self.datadirs = []
self.parent = ""
| Add parent so we can track versions. | Add parent so we can track versions.
| Python | mit | materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org | ---
+++
@@ -11,3 +11,4 @@
self.text = ""
self.metatags = []
self.datadirs = []
+ self.parent = "" |
c37f7705856bd82f5416cef4bb82e1786c3f508c | tests/__init__.py | tests/__init__.py | # Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger()
log.setLevel('DEBUG')
| # Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger()
log.setLevel('DEBUG')
# if nose didn't already attach a log handler, add one here
if not log.handlers:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s [%(module)s:%(lineno)s]: %(message)s'))
log.addHandler(handler)
| Add test logger if not added by nose | Add test logger if not added by nose
| Python | apache-2.0 | thobbs/python-driver,mobify/python-driver,yi719/python-driver,datastax/python-driver,mobify/python-driver,markflorisson/python-driver,mambocab/python-driver,sontek/python-driver,bbirand/python-driver,mike-tr-adamson/python-driver,kracekumar/python-driver,beobal/python-driver,mike-tr-adamson/python-driver,yi719/python-driver,thelastpickle/python-driver,vipjml/python-driver,jregovic/python-driver,coldeasy/python-driver,thelastpickle/python-driver,tempbottle/python-driver,beobal/python-driver,sontek/python-driver,HackerEarth/cassandra-python-driver,HackerEarth/cassandra-python-driver,vipjml/python-driver,coldeasy/python-driver,kishkaru/python-driver,jfelectron/python-driver,markflorisson/python-driver,stef1927/python-driver,jfelectron/python-driver,datastax/python-driver,stef1927/python-driver,jregovic/python-driver,kracekumar/python-driver,bbirand/python-driver,thobbs/python-driver,mambocab/python-driver,tempbottle/python-driver,kishkaru/python-driver | ---
+++
@@ -16,3 +16,8 @@
log = logging.getLogger()
log.setLevel('DEBUG')
+# if nose didn't already attach a log handler, add one here
+if not log.handlers:
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s [%(module)s:%(lineno)s]: %(message)s'))
+ log.addHandler(handler) |
90d933a957509da4189b8f793e27ac563b73264b | reddit_adzerk/adzerkads.py | reddit_adzerk/adzerkads.py | from pylons import c, g
from r2.lib.pages import Ads as BaseAds
class Ads(BaseAds):
def __init__(self):
BaseAds.__init__(self)
adzerk_test_srs = g.live_config.get("adzerk_test_srs")
if adzerk_test_srs and c.site.name in adzerk_test_srs:
if c.secure:
self.ad_url = g.config["adzerk_https_url"].format(
origin=g.https_endpoint)
else:
self.ad_url = g.config["adzerk_url"].format(
origin=g.origin)
self.frame_id = "ad_main"
| from pylons import c, g
from r2.lib.pages import Ads as BaseAds
class Ads(BaseAds):
def __init__(self):
BaseAds.__init__(self)
adzerk_test_srs = g.live_config.get("adzerk_test_srs")
if adzerk_test_srs and c.site.name in adzerk_test_srs:
url_key = "adzerk_https_url" if c.secure else "adzerk_url"
self.ad_url = g.config[url_key].format(
origin=c.request_origin,
)
self.frame_id = "ad_main"
| Use c.request_origin for frame origin. | Use c.request_origin for frame origin.
| Python | bsd-3-clause | madbook/reddit-plugin-adzerk,madbook/reddit-plugin-adzerk,madbook/reddit-plugin-adzerk | ---
+++
@@ -8,10 +8,8 @@
BaseAds.__init__(self)
adzerk_test_srs = g.live_config.get("adzerk_test_srs")
if adzerk_test_srs and c.site.name in adzerk_test_srs:
- if c.secure:
- self.ad_url = g.config["adzerk_https_url"].format(
- origin=g.https_endpoint)
- else:
- self.ad_url = g.config["adzerk_url"].format(
- origin=g.origin)
+ url_key = "adzerk_https_url" if c.secure else "adzerk_url"
+ self.ad_url = g.config[url_key].format(
+ origin=c.request_origin,
+ )
self.frame_id = "ad_main" |
7aa1875f9e542ae539729730cf9457e78d8b775b | walker/main.py | walker/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
def execute_command(target):
os.chdir(target)
command = sys.argv[1:]
try:
subprocess.check_call(command)
except Exception as e:
print "ERROR in %s: %s" % (target, e)
def main():
base = os.getcwdu() + "/"
targets = None
# Get immediate child directories.
for root, dirs, files in os.walk('.'):
targets = dirs
break # dirty hack so we only get the first level
# Traverse through the directories.
for target in sorted(targets, key=lambda s: s.lower()):
target_full_path = base + target
print "walker: in %s" % target_full_path
execute_command(target_full_path)
os.chdir(base)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
def execute_command(target):
os.chdir(target)
command = sys.argv[1:]
try:
subprocess.check_call(command)
except Exception as e:
print "ERROR in %s: %s" % (target, e)
def main():
base = os.getcwdu() + "/"
targets = None
# Get immediate child directories.
for root, dirs, files in os.walk('.'):
targets = dirs
break # dirty hack so we only get the first level
# Traverse through the directories.
for target in sorted(targets, key=lambda s: s.lower()):
target_full_path = base + target
print "\nwalker: in %s" % target_full_path
execute_command(target_full_path)
os.chdir(base)
if __name__ == '__main__':
main()
| Add a new line before displaying the current dir | Add a new line before displaying the current dir
This makes it easier for my brain to read.
| Python | bsd-3-clause | darylyu/walker,darylyu/walker | ---
+++
@@ -24,7 +24,7 @@
# Traverse through the directories.
for target in sorted(targets, key=lambda s: s.lower()):
target_full_path = base + target
- print "walker: in %s" % target_full_path
+ print "\nwalker: in %s" % target_full_path
execute_command(target_full_path)
os.chdir(base)
|
059b5db0768048d4e11fc012e1720213ba365538 | disco_aws_automation/disco_logging.py | disco_aws_automation/disco_logging.py | '''Utility function for logging'''
import logging
import sys
def configure_logging(debug, silent=False):
'''Sets the default logger and the boto logger to appropriate levels of chattiness.'''
logger = logging.getLogger('')
boto_logger = logging.getLogger('boto')
botocore_logger = logging.getLogger('botocore')
if silent and debug:
raise Exception('Debug and silent logging options are mutually exclusive')
if silent:
logging.disable(logging.CRITICAL)
elif debug:
logger.setLevel(logging.DEBUG)
boto_logger.setLevel(logging.INFO)
botocore_logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
boto_logger.setLevel(logging.CRITICAL)
botocore_logger.setLevel(logging.CRITICAL)
# If there are any handlers on the root logger, remove them so that if this function is called more
# than once, we don't get the same statement logged multiple times.
for handler in logger.handlers:
logger.removeHandler(handler)
stream_handler = logging.StreamHandler(sys.__stdout__)
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
logger.addHandler(stream_handler)
| '''Utility function for logging'''
import logging
import logging.handlers as handlers
import sys
def configure_logging(debug, silent=False):
'''Sets the default logger and the boto logger to appropriate levels of chattiness.'''
logger = logging.getLogger('')
boto_logger = logging.getLogger('boto')
botocore_logger = logging.getLogger('botocore')
# If there are any handlers on the root logger, remove them so that if this function is called more
# than once, we don't get the same statement logged multiple times.
for handler in logger.handlers:
logger.removeHandler(handler)
if silent and debug:
raise Exception('Debug and silent logging options are mutually exclusive')
if silent:
logging.disable(logging.CRITICAL)
elif debug:
logger.setLevel(logging.DEBUG)
boto_logger.setLevel(logging.INFO)
botocore_logger.setLevel(logging.DEBUG)
file_handler = handlers.RotatingFileHandler('debug.log', maxBytes=1048576, backupCount=1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
logger.addHandler(file_handler)
boto_logger.addHandler(file_handler)
botocore_logger.addHandler(file_handler)
else:
logger.setLevel(logging.INFO)
boto_logger.setLevel(logging.CRITICAL)
botocore_logger.setLevel(logging.CRITICAL)
stream_handler = logging.StreamHandler(sys.__stdout__)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
logger.addHandler(stream_handler)
| Add file handler for debug logs | Add file handler for debug logs
| Python | bsd-2-clause | amplifylitco/asiaq,amplifylitco/asiaq,amplifylitco/asiaq | ---
+++
@@ -1,5 +1,6 @@
'''Utility function for logging'''
import logging
+import logging.handlers as handlers
import sys
@@ -8,6 +9,11 @@
logger = logging.getLogger('')
boto_logger = logging.getLogger('boto')
botocore_logger = logging.getLogger('botocore')
+
+ # If there are any handlers on the root logger, remove them so that if this function is called more
+ # than once, we don't get the same statement logged multiple times.
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
if silent and debug:
raise Exception('Debug and silent logging options are mutually exclusive')
@@ -18,17 +24,18 @@
logger.setLevel(logging.DEBUG)
boto_logger.setLevel(logging.INFO)
botocore_logger.setLevel(logging.DEBUG)
+ file_handler = handlers.RotatingFileHandler('debug.log', maxBytes=1048576, backupCount=1)
+ file_handler.setLevel(logging.DEBUG)
+ file_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
+ logger.addHandler(file_handler)
+ boto_logger.addHandler(file_handler)
+ botocore_logger.addHandler(file_handler)
else:
logger.setLevel(logging.INFO)
boto_logger.setLevel(logging.CRITICAL)
botocore_logger.setLevel(logging.CRITICAL)
- # If there are any handlers on the root logger, remove them so that if this function is called more
- # than once, we don't get the same statement logged multiple times.
- for handler in logger.handlers:
- logger.removeHandler(handler)
-
stream_handler = logging.StreamHandler(sys.__stdout__)
- stream_handler.setLevel(logging.DEBUG)
+ stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
logger.addHandler(stream_handler) |
d1c5e7971814d5aeedec11bb7610680a4c5a0000 | text_processor.py | text_processor.py | from urllib.request import urlopen
def fetch_words():
with urlopen('http://sixty-north.com/c/t.txt') as story:
story_words = []
for line in story:
line_words = line.decode('utf-8').split()
for word in line_words:
story_words.append(word)
return story_words
def print_items(story_words):
word_list = ''
word_cursor = 0
print("Word Count", len(story_words))
while word_cursor < len(story_words):
paragraphCursor = 0
while paragraphCursor < 6:
if (word_cursor + paragraphCursor) == len(story_words):
break
word_list += str(story_words[word_cursor + paragraphCursor])
word_list += ' '
paragraphCursor += 1
word_cursor += paragraphCursor
word_list += '\n'
print(word_list)
def main():
print_items(fetch_words())
if __name__ == '__main__':
main()
| """Retrieve and print words from a URL.
Usage:
python3 text_processor.py <URL>
"""
import sys
from urllib.request import urlopen
def fetch_words(url):
"""Fetch a list of words from a URL.
Args:
url: The URL of a UTF-8 text document.
Returns:
A UTF-8-decoded list of strings containing the words from the document.
"""
with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.decode('utf-8').split()
for word in line_words:
story_words.append(word)
return story_words
def print_items(items):
"""Print items six per line with a space in between each item
Args:
items: an iterable series that can be parsed as a string
"""
word_list = ''
word_cursor = 0
print("Word Count", len(items))
while word_cursor < len(items):
paragraphCursor = 0
while paragraphCursor < 6:
if (word_cursor + paragraphCursor) == len(items):
break
word_list += str(items[word_cursor + paragraphCursor])
word_list += ' '
paragraphCursor += 1
word_cursor += paragraphCursor
word_list += '\n'
print(word_list)
def main(url):
"""Print each word from a text document from a URL.
Args:
url: The URL to a UTF-8 text document
"""
print_items(fetch_words(url))
if __name__ == '__main__':
main(sys.argv[1])
| Document module with docstrings (Google format) | Document module with docstrings (Google format)
| Python | mit | kentoj/python-fundamentals | ---
+++
@@ -1,8 +1,25 @@
+"""Retrieve and print words from a URL.
+
+Usage:
+
+ python3 text_processor.py <URL>
+"""
+
+import sys
from urllib.request import urlopen
-def fetch_words():
- with urlopen('http://sixty-north.com/c/t.txt') as story:
+def fetch_words(url):
+ """Fetch a list of words from a URL.
+
+ Args:
+ url: The URL of a UTF-8 text document.
+
+ Returns:
+ A UTF-8-decoded list of strings containing the words from the document.
+
+ """
+ with urlopen(url) as story:
story_words = []
for line in story:
line_words = line.decode('utf-8').split()
@@ -11,16 +28,21 @@
return story_words
-def print_items(story_words):
+def print_items(items):
+ """Print items six per line with a space in between each item
+
+ Args:
+ items: an iterable series that can be parsed as a string
+ """
word_list = ''
word_cursor = 0
- print("Word Count", len(story_words))
- while word_cursor < len(story_words):
+ print("Word Count", len(items))
+ while word_cursor < len(items):
paragraphCursor = 0
while paragraphCursor < 6:
- if (word_cursor + paragraphCursor) == len(story_words):
+ if (word_cursor + paragraphCursor) == len(items):
break
- word_list += str(story_words[word_cursor + paragraphCursor])
+ word_list += str(items[word_cursor + paragraphCursor])
word_list += ' '
paragraphCursor += 1
word_cursor += paragraphCursor
@@ -28,8 +50,13 @@
print(word_list)
-def main():
- print_items(fetch_words())
+def main(url):
+ """Print each word from a text document from a URL.
+
+ Args:
+ url: The URL to a UTF-8 text document
+ """
+ print_items(fetch_words(url))
if __name__ == '__main__':
- main()
+ main(sys.argv[1]) |
de6dbe4fa15691cb1a7ec4077e6aaf0eca4e1d48 | PRESUBMIT.py | PRESUBMIT.py | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac']
| # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
license = (
r'.*? Copyright \(c\) %(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': input_api.time.strftime('%Y'),
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac']
| Fix the license header regex. | Fix the license header regex.
Most of the files are attributed to Google Inc so I used this instead of
Chromium Authors.
R=mark@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/7108074 | Python | bsd-3-clause | witwall/gyp,witwall/gyp,witwall/gyp,witwall/gyp,witwall/gyp | ---
+++
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -19,8 +19,17 @@
def CheckChangeOnCommit(input_api, output_api):
report = []
+ license = (
+ r'.*? Copyright \(c\) %(year)s Google Inc\. All rights reserved\.\n'
+ r'.*? Use of this source code is governed by a BSD-style license that '
+ r'can be\n'
+ r'.*? found in the LICENSE file\.\n'
+ ) % {
+ 'year': input_api.time.strftime('%Y'),
+ }
+
report.extend(input_api.canned_checks.PanProjectChecks(
- input_api, output_api))
+ input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status', |
52a95cb48d0829d864d25b5e8c380c0e58d51b16 | coaster/docflow.py | coaster/docflow.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import docflow
from werkzeug.exceptions import Forbidden
__all__ = ['WorkflowStateException', 'WorkflowTransitionException',
'WorkflowPermissionException', 'WorkflowState', 'WorkflowStateGroup',
'DocumentWorkflow']
class WorkflowStateException(docflow.WorkflowStateException, Forbidden):
pass
class WorkflowTransitionException(docflow.WorkflowTransitionException, Forbidden):
pass
class WorkflowPermissionException(docflow.WorkflowPermissionException, Forbidden):
pass
class WorkflowState(docflow.WorkflowState):
__doc__ = docflow.WorkflowState.__doc__
exception_state = WorkflowStateException
exception_transition = WorkflowTransitionException
exception_permission = WorkflowPermissionException
class WorkflowStateGroup(docflow.WorkflowStateGroup):
__doc__ = docflow.WorkflowStateGroup.__doc__
exception_state = WorkflowStateException
exception_transition = WorkflowTransitionException
exception_permission = WorkflowPermissionException
class DocumentWorkflow(docflow.DocumentWorkflow):
__doc__ = docflow.DocumentWorkflow.__doc__
exception_state = WorkflowStateException
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import g
import docflow
from werkzeug.exceptions import Forbidden
__all__ = ['WorkflowStateException', 'WorkflowTransitionException',
'WorkflowPermissionException', 'WorkflowState', 'WorkflowStateGroup',
'InteractiveTransition', 'DocumentWorkflow']
class WorkflowStateException(docflow.WorkflowStateException, Forbidden):
pass
class WorkflowTransitionException(docflow.WorkflowTransitionException, Forbidden):
pass
class WorkflowPermissionException(docflow.WorkflowPermissionException, Forbidden):
pass
class WorkflowState(docflow.WorkflowState):
__doc__ = docflow.WorkflowState.__doc__
exception_state = WorkflowStateException
exception_transition = WorkflowTransitionException
exception_permission = WorkflowPermissionException
class WorkflowStateGroup(docflow.WorkflowStateGroup):
__doc__ = docflow.WorkflowStateGroup.__doc__
exception_state = WorkflowStateException
exception_transition = WorkflowTransitionException
exception_permission = WorkflowPermissionException
class InteractiveTransition(docflow.InteractiveTransition):
__doc__ = docflow.InteractiveTransition.__doc__
def __init__(self, workflow):
super(InteractiveTransition, self).__init__(workflow)
if hasattr(self, 'formclass'):
self.form = formclass(obj=self.document)
def validate(self):
"""Validate self.form, assuming Flask-WTF Form"""
return self.form.validate_on_submit()
class DocumentWorkflow(docflow.DocumentWorkflow):
__doc__ = docflow.DocumentWorkflow.__doc__
exception_state = WorkflowStateException
def permissions(self):
"""
Permissions for this workflow. Plays nice with
:meth:`coaster.views.load_models` and
:class:`coaster.sqlalchemy.PermissionMixin` to determine the available
permissions to the current user.
"""
perms = set(super(DocumentWorkflow, self).permissions())
if hasattr(g, 'permissions'):
perms.update(g.permissions)
if hasattr(self.document, 'permissions') and hasattr(g, 'user'):
perms = self.document.permisssions(g.user, perms)
return perms
| Update for Docflow 0.3.2 and PermissionMixin | Update for Docflow 0.3.2 and PermissionMixin
| Python | bsd-2-clause | hasgeek/coaster | ---
+++
@@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+from flask import g
import docflow
from werkzeug.exceptions import Forbidden
__all__ = ['WorkflowStateException', 'WorkflowTransitionException',
'WorkflowPermissionException', 'WorkflowState', 'WorkflowStateGroup',
- 'DocumentWorkflow']
+ 'InteractiveTransition', 'DocumentWorkflow']
class WorkflowStateException(docflow.WorkflowStateException, Forbidden):
@@ -37,7 +38,34 @@
exception_permission = WorkflowPermissionException
+class InteractiveTransition(docflow.InteractiveTransition):
+ __doc__ = docflow.InteractiveTransition.__doc__
+
+ def __init__(self, workflow):
+ super(InteractiveTransition, self).__init__(workflow)
+ if hasattr(self, 'formclass'):
+ self.form = formclass(obj=self.document)
+
+ def validate(self):
+ """Validate self.form, assuming Flask-WTF Form"""
+ return self.form.validate_on_submit()
+
+
class DocumentWorkflow(docflow.DocumentWorkflow):
__doc__ = docflow.DocumentWorkflow.__doc__
exception_state = WorkflowStateException
+
+ def permissions(self):
+ """
+ Permissions for this workflow. Plays nice with
+ :meth:`coaster.views.load_models` and
+ :class:`coaster.sqlalchemy.PermissionMixin` to determine the available
+ permissions to the current user.
+ """
+ perms = set(super(DocumentWorkflow, self).permissions())
+ if hasattr(g, 'permissions'):
+ perms.update(g.permissions)
+ if hasattr(self.document, 'permissions') and hasattr(g, 'user'):
+ perms = self.document.permisssions(g.user, perms)
+ return perms |
293a1f63b7a3011c36a0cba71874b92be460e1aa | app/views.py | app/views.py | from app import app
from app.models import Post
from flask import render_template
@app.route('/')
@app.route('/page/<int:page>')
def blog(page=1):
"""View the blog."""
posts = Post.query.filter_by(visible=True) \
.order_by(Post.published.desc())
if posts:
pagination = posts.paginate(page=page, per_page=Post.PER_PAGE)
return render_template('blog.html', pagination=pagination)
@app.route('/archive')
def archive():
"""View an overview of all visible posts."""
posts = Post.query.filter_by(visible=True) \
.order_by(Post.published.desc())
return render_template('archive.html', posts=posts)
@app.route('/<path:slug>', methods=['GET', 'POST'])
def detail(slug):
"""View details of post with specified slug."""
post = Post.query.filter_by(visible=True, slug=slug) \
.first_or_404()
return render_template('detail.html', post=post)
| from flask import render_template, send_file
from app import app
from app.models import Post
@app.route('/')
@app.route('/page/<int:page>')
def blog(page=1):
"""View the blog."""
posts = Post.query.filter_by(visible=True) \
.order_by(Post.published.desc())
if posts:
pagination = posts.paginate(page=page, per_page=Post.PER_PAGE)
return render_template('blog.html', pagination=pagination)
@app.route('/archive')
def archive():
"""View an overview of all visible posts."""
posts = Post.query.filter_by(visible=True) \
.order_by(Post.published.desc())
return render_template('archive.html', posts=posts)
@app.route('/<path:slug>', methods=['GET', 'POST'])
def detail(slug):
"""View details of post with specified slug."""
post = Post.query.filter_by(visible=True, slug=slug) \
.first_or_404()
return render_template('detail.html', post=post)
@app.route('/admin')
def admin():
return send_file('static/admin-panel/app/index.html')
| Add route for admin panel | Add route for admin panel
| Python | mit | thebitstick/Flask-Blog,thebitstick/Flask-Blog | ---
+++
@@ -1,6 +1,7 @@
+from flask import render_template, send_file
+
from app import app
from app.models import Post
-from flask import render_template
@app.route('/')
@@ -28,3 +29,8 @@
post = Post.query.filter_by(visible=True, slug=slug) \
.first_or_404()
return render_template('detail.html', post=post)
+
+
+@app.route('/admin')
+def admin():
+ return send_file('static/admin-panel/app/index.html') |
cd1edf946fcf8b22b5f78f4a1db393b777951527 | website/files/utils.py | website/files/utils.py | from osf.models.base import generate_object_id
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
:param Folder src: The source to copy children from
:param Node target_node: The node to copy files to
:param Folder parent: The parent of to attach the clone of src to, if applicable
"""
assert not parent or not parent.is_file, 'Parent must be a folder'
cloned = src.clone()
cloned.parent = parent
cloned.target = target_node
cloned.name = name or cloned.name
cloned.copied_from = src
cloned.save()
if src.is_file and src.versions.exists():
fileversions = src.versions.select_related('region').order_by('-created')
most_recent_fileversion = fileversions.first()
if most_recent_fileversion.region != target_node.osfstorage_region:
# add all original version except the most recent
cloned.versions.add(*fileversions[1:])
# setting the id to None and calling save generates a new object
most_recent_fileversion.id = None
most_recent_fileversion._id = generate_object_id()
most_recent_fileversion.region = target_node.osfstorage_region
most_recent_fileversion.save()
cloned.versions.add(most_recent_fileversion)
else:
cloned.versions.add(*src.versions.all())
if not src.is_file:
for child in src.children:
copy_files(child, target_node, parent=cloned)
return cloned
|
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
:param Folder src: The source to copy children from
:param Node target_node: The node to copy files to
:param Folder parent: The parent of to attach the clone of src to, if applicable
"""
assert not parent or not parent.is_file, 'Parent must be a folder'
cloned = src.clone()
cloned.parent = parent
cloned.target = target_node
cloned.name = name or cloned.name
cloned.copied_from = src
cloned.save()
if src.is_file and src.versions.exists():
fileversions = src.versions.select_related('region').order_by('-created')
most_recent_fileversion = fileversions.first()
if most_recent_fileversion.region != target_node.osfstorage_region:
# add all original version except the most recent
cloned.versions.add(*fileversions[1:])
# create a new most recent version and update the region before adding
new_fileversion = most_recent_fileversion.clone()
new_fileversion.region = target_node.osfstorage_region
new_fileversion.save()
cloned.versions.add(new_fileversion)
else:
cloned.versions.add(*src.versions.all())
if not src.is_file:
for child in src.children:
copy_files(child, target_node, parent=cloned)
return cloned
| Use clone() method from BaseModel to copy most recent fileversion | Use clone() method from BaseModel to copy most recent fileversion
| Python | apache-2.0 | adlius/osf.io,felliott/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,mfraezz/osf.io,cslzchen/osf.io,caseyrollins/osf.io,mfraezz/osf.io,saradbowman/osf.io,pattisdr/osf.io,aaxelb/osf.io,aaxelb/osf.io,adlius/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,cslzchen/osf.io,cslzchen/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,adlius/osf.io,pattisdr/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,felliott/osf.io,felliott/osf.io,mattclark/osf.io,saradbowman/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,mfraezz/osf.io,adlius/osf.io,mattclark/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,felliott/osf.io,CenterForOpenScience/osf.io | ---
+++
@@ -1,5 +1,3 @@
-from osf.models.base import generate_object_id
-
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
@@ -23,12 +21,11 @@
if most_recent_fileversion.region != target_node.osfstorage_region:
# add all original version except the most recent
cloned.versions.add(*fileversions[1:])
- # setting the id to None and calling save generates a new object
- most_recent_fileversion.id = None
- most_recent_fileversion._id = generate_object_id()
- most_recent_fileversion.region = target_node.osfstorage_region
- most_recent_fileversion.save()
- cloned.versions.add(most_recent_fileversion)
+ # create a new most recent version and update the region before adding
+ new_fileversion = most_recent_fileversion.clone()
+ new_fileversion.region = target_node.osfstorage_region
+ new_fileversion.save()
+ cloned.versions.add(new_fileversion)
else:
cloned.versions.add(*src.versions.all())
if not src.is_file: |
659036918e2c90b47c83c640eef62eaeec42b35a | opps/contrib/notifications/models.py | opps/contrib/notifications/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opps.core.models import Publishable
from opps.db import Db
NOTIFICATION_TYPE = (
(u'json', _(u'JSON')),
(u'text', _(u'Text')),
(u'html', _(u'HTML')),
)
class Notification(Publishable):
container = models.ForeignKey('containers.Container')
action = models.CharField(_('Action'), max_length=75,
default="message")
type = models.CharField(_('Type'), max_length=10,
choices=NOTIFICATION_TYPE,
type='json')
message = models.TextField(_('Message'))
def add(self, container, message, action='message', _type='json',
**attrs):
notification = Notification.objects.create(
container=container,
action=action,
type=_type,
message=message,
**attrs
)
_db = Db(notification.container.get_absolute_url(),
notification.container.id)
_db.publish(json.dumps({
"action": notification.action,
"id": notification.id,
"published": notification.published,
"date": notification.date_available,
"message": notification.message}))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opps.core.models import Publishable
from opps.db import Db
NOTIFICATION_TYPE = (
(u'json', _(u'JSON')),
(u'text', _(u'Text')),
(u'html', _(u'HTML')),
)
class Notification(Publishable):
container = models.ForeignKey('containers.Container')
action = models.CharField(_('Action'), max_length=75,
default="message")
type = models.CharField(_('Type'), max_length=10,
choices=NOTIFICATION_TYPE,
type='json')
message = models.TextField(_('Message'))
def save(self, *args, **kwargs):
_db = Db(self.container.get_absolute_url(),
self.container.id)
_db.publish(json.dumps({
"action": self.action,
"id": self.id,
"published": self.published,
"date": self.date_available,
"message": self.message}))
| Update save Notification, save in nosql database | Update save Notification, save in nosql database
| Python | mit | williamroot/opps,opps/opps,jeanmask/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,YACOWS/opps,opps/opps,jeanmask/opps,williamroot/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,YACOWS/opps | ---
+++
@@ -24,21 +24,12 @@
type='json')
message = models.TextField(_('Message'))
- def add(self, container, message, action='message', _type='json',
- **attrs):
- notification = Notification.objects.create(
- container=container,
- action=action,
- type=_type,
- message=message,
- **attrs
- )
-
- _db = Db(notification.container.get_absolute_url(),
- notification.container.id)
+ def save(self, *args, **kwargs):
+ _db = Db(self.container.get_absolute_url(),
+ self.container.id)
_db.publish(json.dumps({
- "action": notification.action,
- "id": notification.id,
- "published": notification.published,
- "date": notification.date_available,
- "message": notification.message}))
+ "action": self.action,
+ "id": self.id,
+ "published": self.published,
+ "date": self.date_available,
+ "message": self.message})) |
632470cad13b7bfe88c52d4aafd9cbf9fff37b07 | rasterio/tool.py | rasterio/tool.py |
import code
import collections
import logging
import sys
import numpy
import rasterio
logger = logging.getLogger('rasterio')
Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
def main(banner, dataset):
def show(source):
"""Show a raster using matplotlib.
The raster may be either an ndarray or a (dataset, bidx)
tuple.
"""
import matplotlib.pyplot as plt
if isinstance(source, tuple):
arr = source[0].read_band(source[1])
else:
arr = source
plt.imshow(arr)
plt.gray()
plt.show()
def stats(source):
"""Return a tuple with raster min, max, and mean.
"""
if isinstance(source, tuple):
arr = source[0].read_band(source[1])
else:
arr = source
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
code.interact(
banner, local=dict(locals(), src=dataset, np=numpy, rio=rasterio))
return 0
|
import code
import collections
import logging
import sys
import numpy
import rasterio
logger = logging.getLogger('rasterio')
Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
def main(banner, dataset):
def show(source, cmap='gray'):
"""Show a raster using matplotlib.
The raster may be either an ndarray or a (dataset, bidx)
tuple.
"""
import matplotlib.pyplot as plt
if isinstance(source, tuple):
arr = source[0].read_band(source[1])
else:
arr = source
plt.imshow(arr, cmap=cmap)
plt.show()
def stats(source):
"""Return a tuple with raster min, max, and mean.
"""
if isinstance(source, tuple):
arr = source[0].read_band(source[1])
else:
arr = source
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
code.interact(
banner, local=dict(locals(), src=dataset, np=numpy, rio=rasterio))
return 0
| Add colormap option to show(). | Add colormap option to show().
| Python | bsd-3-clause | perrygeo/rasterio,perrygeo/rasterio,clembou/rasterio,johanvdw/rasterio,njwilson23/rasterio,clembou/rasterio,clembou/rasterio,johanvdw/rasterio,njwilson23/rasterio,snorfalorpagus/rasterio,brendan-ward/rasterio,kapadia/rasterio,youngpm/rasterio,kapadia/rasterio,sgillies/rasterio,kapadia/rasterio,brendan-ward/rasterio,johanvdw/rasterio,njwilson23/rasterio,perrygeo/rasterio,youngpm/rasterio,youngpm/rasterio,brendan-ward/rasterio | ---
+++
@@ -15,7 +15,7 @@
def main(banner, dataset):
- def show(source):
+ def show(source, cmap='gray'):
"""Show a raster using matplotlib.
The raster may be either an ndarray or a (dataset, bidx)
@@ -26,8 +26,7 @@
arr = source[0].read_band(source[1])
else:
arr = source
- plt.imshow(arr)
- plt.gray()
+ plt.imshow(arr, cmap=cmap)
plt.show()
def stats(source): |
841d89ea9199bfad94c74b75f2bab5ae2690ede4 | numba/tests/broken_issues/test_closure_modulo.py | numba/tests/broken_issues/test_closure_modulo.py | from numba import autojit
import numpy as np
print np.zeros(10).dtype
@autojit
def closure_modulo(a, b):
c = np.zeros(10)
@jit('f8[:]()')
def foo():
c[0] = a % b
return c
return foo()
print closure_modulo(100, 48)
| from numba import autojit
@autojit
def closure_modulo(a, b):
@jit('int32()')
def foo():
return a % b
return foo()
print closure_modulo(100, 48)
| Simplify and cleanup previous commit. | Simplify and cleanup previous commit.
| Python | bsd-2-clause | ssarangi/numba,stefanseefeld/numba,pitrou/numba,pombredanne/numba,cpcloud/numba,stefanseefeld/numba,gdementen/numba,seibert/numba,sklam/numba,pombredanne/numba,IntelLabs/numba,IntelLabs/numba,numba/numba,gdementen/numba,ssarangi/numba,pombredanne/numba,shiquanwang/numba,stuartarchibald/numba,stonebig/numba,IntelLabs/numba,numba/numba,GaZ3ll3/numba,sklam/numba,sklam/numba,stefanseefeld/numba,pitrou/numba,stuartarchibald/numba,gmarkall/numba,stefanseefeld/numba,jriehl/numba,stuartarchibald/numba,sklam/numba,gdementen/numba,pombredanne/numba,stonebig/numba,shiquanwang/numba,seibert/numba,gmarkall/numba,ssarangi/numba,pombredanne/numba,GaZ3ll3/numba,numba/numba,seibert/numba,gdementen/numba,cpcloud/numba,pitrou/numba,stonebig/numba,IntelLabs/numba,cpcloud/numba,ssarangi/numba,pitrou/numba,jriehl/numba,stuartarchibald/numba,gmarkall/numba,shiquanwang/numba,gmarkall/numba,cpcloud/numba,GaZ3ll3/numba,cpcloud/numba,numba/numba,jriehl/numba,pitrou/numba,numba/numba,stefanseefeld/numba,jriehl/numba,ssarangi/numba,stonebig/numba,seibert/numba,GaZ3ll3/numba,jriehl/numba,seibert/numba,IntelLabs/numba,stuartarchibald/numba,sklam/numba,GaZ3ll3/numba,gmarkall/numba,gdementen/numba,stonebig/numba | ---
+++
@@ -1,15 +1,10 @@
from numba import autojit
-import numpy as np
-print np.zeros(10).dtype
-
@autojit
def closure_modulo(a, b):
- c = np.zeros(10)
- @jit('f8[:]()')
+ @jit('int32()')
def foo():
- c[0] = a % b
- return c
+ return a % b
return foo()
print closure_modulo(100, 48) |
845fe88451dfc8e3505d4481e8160344942d3fdf | telethon/tl/patched/__init__.py | telethon/tl/patched/__init__.py | from .. import types, alltlobjects
from ..custom.message import Message as _Message
class MessageEmpty(_Message, types.MessageEmpty):
pass
types.MessageEmpty = MessageEmpty
alltlobjects.tlobjects[MessageEmpty.CONSTRUCTOR_ID] = MessageEmpty
class MessageService(_Message, types.MessageService):
pass
types.MessageService = MessageService
alltlobjects.tlobjects[MessageService.CONSTRUCTOR_ID] = MessageService
class _Message(_Message, types.Message):
pass
Message = _Message
types.Message = Message
| from .. import types, alltlobjects
from ..custom.message import Message as _Message
class MessageEmpty(_Message, types.MessageEmpty):
pass
types.MessageEmpty = MessageEmpty
alltlobjects.tlobjects[MessageEmpty.CONSTRUCTOR_ID] = MessageEmpty
class MessageService(_Message, types.MessageService):
pass
types.MessageService = MessageService
alltlobjects.tlobjects[MessageService.CONSTRUCTOR_ID] = MessageService
class Message(_Message, types.Message):
pass
types.Message = Message
alltlobjects.tlobjects[Message.CONSTRUCTOR_ID] = Message
| Fix definition typo in patched module | Fix definition typo in patched module
| Python | mit | LonamiWebs/Telethon,LonamiWebs/Telethon,LonamiWebs/Telethon,LonamiWebs/Telethon | ---
+++
@@ -13,8 +13,8 @@
types.MessageService = MessageService
alltlobjects.tlobjects[MessageService.CONSTRUCTOR_ID] = MessageService
-class _Message(_Message, types.Message):
+class Message(_Message, types.Message):
pass
-Message = _Message
types.Message = Message
+alltlobjects.tlobjects[Message.CONSTRUCTOR_ID] = Message |
f2d972831733fc77fef93d7d665750a55bbd4c33 | cli_tcdel.py | cli_tcdel.py | #!/usr/bin/env python
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import sys
from tcconfig.tcshow import main
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import sys
from tcconfig.tcdel import main
if __name__ == '__main__':
sys.exit(main())
| Fix tcdel for the deb package | Fix tcdel for the deb package
| Python | mit | thombashi/tcconfig,thombashi/tcconfig | ---
+++
@@ -7,7 +7,7 @@
import sys
-from tcconfig.tcshow import main
+from tcconfig.tcdel import main
if __name__ == '__main__': |
7906153b4718f34ed31c193a8e80b171e567209c | go/routers/keyword/view_definition.py | go/routers/keyword/view_definition.py | from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if (not form.is_valid()) or form.cleaned_data['DELETE']:
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
| from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if not form.is_valid():
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
| Remove unnecessary and broken DELETE check. | Remove unnecessary and broken DELETE check.
| Python | bsd-3-clause | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | ---
+++
@@ -17,7 +17,7 @@
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
- if (not form.is_valid()) or form.cleaned_data['DELETE']:
+ if not form.is_valid():
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint'] |
25ec9d2ee99437d0b68f53c9a4f1f5bc72ecf71f | app/utils.py | app/utils.py | def get_or_create(model, **kwargs):
""" Returns an instance of model and whether or not it already existed in a tuple. """
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True | from urllib.parse import urlparse, urljoin
from flask import request
def get_or_create(model, **kwargs):
""" Returns an instance of model and whether or not it already existed in a tuple. """
instance = model.query.filter_by(**kwargs).first()
if instance:
return instance, False
else:
instance = model(**kwargs)
return instance, True
def is_safe_url(target):
""" Checks if an URL is safe. """
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc | Add utility function for checking URL safety | Add utility function for checking URL safety
| Python | mit | Encrylize/MyDictionary,Encrylize/MyDictionary,Encrylize/MyDictionary | ---
+++
@@ -1,3 +1,6 @@
+from urllib.parse import urlparse, urljoin
+from flask import request
+
def get_or_create(model, **kwargs):
""" Returns an instance of model and whether or not it already existed in a tuple. """
instance = model.query.filter_by(**kwargs).first()
@@ -6,3 +9,9 @@
else:
instance = model(**kwargs)
return instance, True
+
+def is_safe_url(target):
+ """ Checks if an URL is safe. """
+ ref_url = urlparse(request.host_url)
+ test_url = urlparse(urljoin(request.host_url, target))
+ return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc |
7d761d538c501cefb5e6acf62d3f0e945584b5ec | src/django_integration.py | src/django_integration.py | # -*- coding: utf-8 -*-
# Libraries
from datetime import datetime
# Django stuff
from Instanssi.kompomaatti.misc.events import get_upcoming
from Instanssi.screenshow.models import IRCMessage
from Instanssi.kompomaatti.models import Event
def django_get_event(event_id):
try:
return Event.objects.get(pk=event_id)
except Event.DoesNotExist:
return None
def django_get_upcoming(event):
return get_upcoming(event)[:5]
def django_log_cleanup():
limit = 50
n = 0
last_id = 0
for msg in IRCMessage.objects.all().order_by('-id'):
last_id = msg.id
if n >= limit:
break
n += 1
IRCMessage.objects.filter(id__lt=last_id).delete()
def django_log_add(user, msg, event_id):
try:
message = IRCMessage()
message.event_id = event_id
message.date = datetime.now()
message.message = msg
message.nick = user
message.save()
except UnicodeDecodeError:
return False
return True
| # -*- coding: utf-8 -*-
# Libraries
from django.utils import timezone
# Django stuff
from Instanssi.kompomaatti.misc.events import get_upcoming
from Instanssi.screenshow.models import IRCMessage
from Instanssi.kompomaatti.models import Event
def django_get_event(event_id):
try:
return Event.objects.get(pk=event_id)
except Event.DoesNotExist:
return None
def django_get_upcoming(event):
return get_upcoming(event)[:5]
def django_log_cleanup():
limit = 50
n = 0
last_id = 0
for msg in IRCMessage.objects.all().order_by('-id'):
last_id = msg.id
if n >= limit:
break
n += 1
IRCMessage.objects.filter(id__lt=last_id).delete()
def django_log_add(user, msg, event_id):
try:
message = IRCMessage()
message.event_id = event_id
message.date = timezone.now()
message.message = msg
message.nick = user
message.save()
except UnicodeDecodeError:
return False
return True
| Use timezone library for dates | Use timezone library for dates
| Python | mit | Instanssi/KompomaattiBot | ---
+++
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Libraries
-from datetime import datetime
+from django.utils import timezone
# Django stuff
from Instanssi.kompomaatti.misc.events import get_upcoming
@@ -37,7 +37,7 @@
try:
message = IRCMessage()
message.event_id = event_id
- message.date = datetime.now()
+ message.date = timezone.now()
message.message = msg
message.nick = user
message.save() |
91d6021fb0db6052570f1a0305a141e1af13b6e3 | localeurl/models.py | localeurl/models.py | from django.conf import settings
from django.core import urlresolvers
from django.utils import translation
from localeurl import utils
def reverse(*args, **kwargs):
reverse_kwargs = kwargs.get('kwargs', {})
locale = utils.supported_language(reverse_kwargs.pop('locale',
translation.get_language()))
url = django_reverse(*args, **kwargs)
_, path = utils.strip_script_prefix(url)
return utils.locale_url(path, locale)
django_reverse = None
def patch_reverse():
"""
Monkey-patches the urlresolvers.reverse function. Will not patch twice.
"""
global django_reverse
if urlresolvers.reverse is not reverse:
django_reverse = urlresolvers.reverse
urlresolvers.reverse = reverse
if settings.USE_I18N:
patch_reverse()
| from django.conf import settings
from django.core import urlresolvers
from django.utils import translation
from localeurl import utils
def reverse(*args, **kwargs):
reverse_kwargs = kwargs.get('kwargs', {})
if reverse_kwargs!=None:
locale = utils.supported_language(reverse_kwargs.pop('locale',
translation.get_language()))
else:
locale = translation.get_language()
url = django_reverse(*args, **kwargs)
_, path = utils.strip_script_prefix(url)
return utils.locale_url(path, locale)
django_reverse = None
def patch_reverse():
"""
Monkey-patches the urlresolvers.reverse function. Will not patch twice.
"""
global django_reverse
if urlresolvers.reverse is not reverse:
django_reverse = urlresolvers.reverse
urlresolvers.reverse = reverse
if settings.USE_I18N:
patch_reverse()
| Handle situation when kwargs is None | Handle situation when kwargs is None
| Python | mit | carljm/django-localeurl,gonnado/django-localeurl,extertioner/django-localeurl | ---
+++
@@ -5,8 +5,11 @@
def reverse(*args, **kwargs):
reverse_kwargs = kwargs.get('kwargs', {})
- locale = utils.supported_language(reverse_kwargs.pop('locale',
- translation.get_language()))
+ if reverse_kwargs!=None:
+ locale = utils.supported_language(reverse_kwargs.pop('locale',
+ translation.get_language()))
+ else:
+ locale = translation.get_language()
url = django_reverse(*args, **kwargs)
_, path = utils.strip_script_prefix(url)
return utils.locale_url(path, locale) |
d367f32d6b8269d402305714eb95fe708f8e2b0d | junction/conferences/serializers.py | junction/conferences/serializers.py | from rest_framework import serializers
from .models import Conference, ConferenceVenue, Room
class ConferenceSerializer(serializers.HyperlinkedModelSerializer):
status = serializers.CharField(source='get_status_display')
class Meta:
model = Conference
fields = ('id', 'name', 'slug', 'description',
'start_date', 'end_date', 'status', 'venue')
class VenueSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ConferenceVenue
fields = ('name', 'address')
class RoomSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Room
fields = ('name', 'venue', 'note')
| from rest_framework import serializers
from .models import Conference, ConferenceVenue, Room
class ConferenceSerializer(serializers.HyperlinkedModelSerializer):
status = serializers.CharField(source='get_status_display')
class Meta:
model = Conference
fields = ('id', 'name', 'slug', 'description',
'start_date', 'end_date', 'status', 'venue')
class VenueSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ConferenceVenue
fields = ('name', 'address', 'latitude', 'longitudes')
class RoomSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Room
fields = ('name', 'venue', 'note')
| Add latitude and longitude in api | Add latitude and longitude in api
| Python | mit | ChillarAnand/junction,praba230890/junction,nava45/junction,pythonindia/junction,ChillarAnand/junction,ChillarAnand/junction,pythonindia/junction,nava45/junction,nava45/junction,farhaanbukhsh/junction,nava45/junction,praba230890/junction,farhaanbukhsh/junction,pythonindia/junction,ChillarAnand/junction,pythonindia/junction,farhaanbukhsh/junction,farhaanbukhsh/junction,praba230890/junction,praba230890/junction | ---
+++
@@ -15,7 +15,7 @@
class VenueSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ConferenceVenue
- fields = ('name', 'address')
+ fields = ('name', 'address', 'latitude', 'longitudes')
class RoomSerializer(serializers.HyperlinkedModelSerializer): |
101784851bf8c7add95bba7dbb4277114b7a3098 | refugeedata/mailings/tasks.py | refugeedata/mailings/tasks.py | from django_rq import job
from .. import utils
@job
def send_sms(to, body):
utils.send_sms(to=to, body=body)
| from django_rq import job
from .. import utils
@job
def send_sms(to, body):
for number in to:
send_single_sms.delay([to], body)
@job
def send_single_sms(to, body):
utils.send_sms(to=to, body=body)
| Split SMS sending into multiple jobs (to get around Heroku timeout) | Split SMS sending into multiple jobs (to get around Heroku timeout)
| Python | mit | ukch/refugeedata,ukch/refugeedata,ukch/refugeedata,ukch/refugeedata | ---
+++
@@ -5,4 +5,10 @@
@job
def send_sms(to, body):
+ for number in to:
+ send_single_sms.delay([to], body)
+
+
+@job
+def send_single_sms(to, body):
utils.send_sms(to=to, body=body) |
03c62fd2676918524a8cef469028d41f1b74d2f4 | python/ql/test/experimental/library-tests/frameworks/django/SqlExecution.py | python/ql/test/experimental/library-tests/frameworks/django/SqlExecution.py | from django.db import connection, models
from django.db.models.expressions import RawSQL
def test_plain():
cursor = connection.cursor()
cursor.execute("some sql") # $getSql="some sql"
def test_context():
with connection.cursor() as cursor:
cursor.execute("some sql") # $getSql="some sql"
cursor.execute(sql="some sql") # $getSql="some sql"
class User(models.Model):
pass
def test_model():
User.objects.raw("some sql") # $getSql="some sql"
User.objects.annotate(RawSQL("some sql")) # $getSql="some sql"
User.objects.annotate(val=RawSQL("some sql")) # $getSql="some sql"
User.objects.extra("some sql") # $getSql="some sql"
User.objects.extra(select="select", where="where", tables="tables", order_by="order_by") # $getSql="select" $getSql="where" $getSql="tables" $getSql="order_by"
raw = RawSQL("so raw")
Users.objects.annotate(val=raw) # $f-:getSql="so raw"
| from django.db import connection, models
from django.db.models.expressions import RawSQL
def test_plain():
cursor = connection.cursor()
cursor.execute("some sql") # $getSql="some sql"
def test_context():
with connection.cursor() as cursor:
cursor.execute("some sql") # $getSql="some sql"
cursor.execute(sql="some sql") # $getSql="some sql"
class User(models.Model):
pass
def test_model():
User.objects.raw("some sql") # $getSql="some sql"
User.objects.annotate(RawSQL("some sql")) # $getSql="some sql"
User.objects.annotate(val=RawSQL("some sql")) # $getSql="some sql"
User.objects.extra("some sql") # $getSql="some sql"
User.objects.extra(select="select", where="where", tables="tables", order_by="order_by") # $getSql="select" $getSql="where" $getSql="tables" $getSql="order_by"
raw = RawSQL("so raw")
User.objects.annotate(val=raw) # $f-:getSql="so raw"
| Fix typo in test case | Python: Fix typo in test case
| Python | mit | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | ---
+++
@@ -25,4 +25,4 @@
User.objects.extra(select="select", where="where", tables="tables", order_by="order_by") # $getSql="select" $getSql="where" $getSql="tables" $getSql="order_by"
raw = RawSQL("so raw")
- Users.objects.annotate(val=raw) # $f-:getSql="so raw"
+ User.objects.annotate(val=raw) # $f-:getSql="so raw" |
7ffe4749d1c8f5315775a50a9f7b6eb632c6a258 | scorecard/tests/test_views.py | scorecard/tests/test_views.py | import json
from django.test import (
TransactionTestCase,
Client,
override_settings,
)
from . import (
import_data,
)
from .resources import (
GeographyResource,
MunicipalityProfileResource,
MedianGroupResource,
RatingCountGroupResource,
)
@override_settings(
SITE_ID=2,
STATICFILES_STORAGE="django.contrib.staticfiles.storage.StaticFilesStorage",
)
class GeographyDetailViewTestCase(TransactionTestCase):
serialized_rollback = True
def test_context(self):
# Import sample data
import_data(
GeographyResource,
"views/scorecard_geography.csv",
)
import_data(
MunicipalityProfileResource,
"views/municipality_profile.csv",
)
import_data(
MedianGroupResource,
"views/median_group.csv",
)
import_data(
RatingCountGroupResource,
"views/rating_count_group.csv",
)
# Make request
client = Client()
response = client.get("/profiles/municipality-CPT-city-of-cape-town/")
context = response.context
page_data = json.loads(context["page_data_json"])
# Test for amount types
self.assertIsInstance(page_data["amount_types_v1"], dict)
# Test for cube names
self.assertIsInstance(page_data["cube_names"], dict)
# Test for municipality category descriptions
self.assertIsInstance(page_data["municipal_category_descriptions"], dict)
| import json
from infrastructure.models import FinancialYear
from django.test import (
TransactionTestCase,
Client,
override_settings,
)
from . import (
import_data,
)
from .resources import (
GeographyResource,
MunicipalityProfileResource,
MedianGroupResource,
RatingCountGroupResource,
)
@override_settings(
SITE_ID=2,
STATICFILES_STORAGE="django.contrib.staticfiles.storage.StaticFilesStorage",
)
class GeographyDetailViewTestCase(TransactionTestCase):
serialized_rollback = True
def test_context(self):
# Import sample data
import_data(
GeographyResource,
"views/scorecard_geography.csv",
)
import_data(
MunicipalityProfileResource,
"views/municipality_profile.csv",
)
import_data(
MedianGroupResource,
"views/median_group.csv",
)
import_data(
RatingCountGroupResource,
"views/rating_count_group.csv",
)
fy = FinancialYear.objects.create(budget_year="2019/2020")
# Make request
client = Client()
response = client.get("/profiles/municipality-CPT-city-of-cape-town/")
context = response.context
page_data = json.loads(context["page_data_json"])
# Test for amount types
self.assertIsInstance(page_data["amount_types_v1"], dict)
# Test for cube names
self.assertIsInstance(page_data["cube_names"], dict)
# Test for municipality category descriptions
self.assertIsInstance(page_data["municipal_category_descriptions"], dict)
| Create a financial year to test with | Create a financial year to test with
| Python | mit | Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data | ---
+++
@@ -1,4 +1,5 @@
import json
+from infrastructure.models import FinancialYear
from django.test import (
TransactionTestCase,
@@ -42,6 +43,9 @@
RatingCountGroupResource,
"views/rating_count_group.csv",
)
+
+ fy = FinancialYear.objects.create(budget_year="2019/2020")
+
# Make request
client = Client()
response = client.get("/profiles/municipality-CPT-city-of-cape-town/") |
54d88ef6ebfacbb2924ca9bc94935ed81fc90244 | migrations/versions/1815829d365_.py | migrations/versions/1815829d365_.py | """empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geometry_application_ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
| """empty message
Revision ID: 1815829d365
Revises: 3fcddd64a72
Create Date: 2016-02-09 17:58:47.362133
"""
# revision identifiers, used by Alembic.
revision = '1815829d365'
down_revision = '3fcddd64a72'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geometry_application_ref
op.execute("DROP INDEX title_abr_idx")
op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (case when (record->'data'->>'geometry_application_reference') IS NULL then 'NULL' else (record->'data'->>'geometry_application_reference') end))")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("DROP INDEX title_abr_geo_idx")
op.execute("CREATE UNIQUE INDEX title_abr_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'))")
### end Alembic commands ###
| Fix index for title without geo abr | Fix index for title without geo abr | Python | mit | LandRegistry/system-of-record,LandRegistry/system-of-record | ---
+++
@@ -18,7 +18,7 @@
### commands auto generated by Alembic - please adjust! ###
# create new unique index to include geometry_application_ref
op.execute("DROP INDEX title_abr_idx")
- op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (record->'data'->>'geometry_application_reference'))")
+ op.execute("CREATE UNIQUE INDEX title_abr_geo_idx ON records((record->'data'->>'title_number'),(record->'data'->>'application_reference'), (case when (record->'data'->>'geometry_application_reference') IS NULL then 'NULL' else (record->'data'->>'geometry_application_reference') end))")
### end Alembic commands ###
|
acb9cd566f259c37c0c943465c324fedb252df9a | tools/grit/grit/extern/FP.py | tools/grit/grit/extern/FP.py | #!/usr/bin/python2.2
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import md5
"""64-bit fingerprint support for strings.
Usage:
from extern import FP
print 'Fingerprint is %ld' % FP.FingerPrint('Hello world!')
"""
def UnsignedFingerPrint(str, encoding='utf-8'):
"""Generate a 64-bit fingerprint by taking the first half of the md5
of the string."""
hex128 = md5.new(str).hexdigest()
int64 = long(hex128[:16], 16)
return int64
def FingerPrint(str, encoding='utf-8'):
fp = UnsignedFingerPrint(str, encoding=encoding)
# interpret fingerprint as signed longs
if fp & 0x8000000000000000L:
fp = - ((~fp & 0xFFFFFFFFFFFFFFFFL) + 1)
return fp
| #!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import md5
"""64-bit fingerprint support for strings.
Usage:
from extern import FP
print 'Fingerprint is %ld' % FP.FingerPrint('Hello world!')
"""
def UnsignedFingerPrint(str, encoding='utf-8'):
"""Generate a 64-bit fingerprint by taking the first half of the md5
of the string."""
hex128 = md5.new(str).hexdigest()
int64 = long(hex128[:16], 16)
return int64
def FingerPrint(str, encoding='utf-8'):
fp = UnsignedFingerPrint(str, encoding=encoding)
# interpret fingerprint as signed longs
if fp & 0x8000000000000000L:
fp = - ((~fp & 0xFFFFFFFFFFFFFFFFL) + 1)
return fp
| Remove version number from Python shebang. | Remove version number from Python shebang.
On special request from someone trying to purge python2.2 from code indexed
internally at Google.
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@7071 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | yitian134/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,adobe/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,ropik/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,adobe/chromium,gavinp/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium | ---
+++
@@ -1,4 +1,4 @@
-#!/usr/bin/python2.2
+#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. |
c87f75334aa7253ad209bdd4d88c2429723e51d1 | admin/desk/urls.py | admin/desk/urls.py | from django.conf.urls import url
from admin.desk import views
urlpatterns = [
url(r'^$', views.DeskCaseList.as_view(), name='cases'),
url(r'^customer/(?P<user_id>[a-z0-9]+)/$', views.DeskCustomer.as_view(),
name='customer'),
url(r'^cases/(?P<user_id>[a-z0-9]+)/$', views.DeskCaseList.as_view(),
name='user_cases'),
]
| from django.conf.urls import url
from admin.desk import views
urlpatterns = [
url(r'^customer/(?P<user_id>[a-z0-9]+)/$', views.DeskCustomer.as_view(),
name='customer'),
url(r'^cases/(?P<user_id>[a-z0-9]+)/$', views.DeskCaseList.as_view(),
name='user_cases'),
]
| Remove unused bare desk URL | Remove unused bare desk URL
| Python | apache-2.0 | hmoco/osf.io,aaxelb/osf.io,laurenrevere/osf.io,pattisdr/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,saradbowman/osf.io,chrisseto/osf.io,cwisecarver/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,acshi/osf.io,crcresearch/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,felliott/osf.io,monikagrabowska/osf.io,chennan47/osf.io,pattisdr/osf.io,acshi/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,acshi/osf.io,cwisecarver/osf.io,saradbowman/osf.io,mluo613/osf.io,mfraezz/osf.io,mattclark/osf.io,brianjgeiger/osf.io,adlius/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,icereval/osf.io,chrisseto/osf.io,caneruguz/osf.io,mluo613/osf.io,erinspace/osf.io,aaxelb/osf.io,mattclark/osf.io,cslzchen/osf.io,acshi/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,caseyrollins/osf.io,leb2dg/osf.io,alexschiller/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,felliott/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,erinspace/osf.io,mluo613/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,chennan47/osf.io,crcresearch/osf.io,icereval/osf.io,acshi/osf.io,caneruguz/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,adlius/osf.io,sloria/osf.io,leb2dg/osf.io,mattclark/osf.io,hmoco/osf.io,chrisseto/osf.io,mfraezz/osf.io,chrisseto/osf.io,TomBaxter/osf.io,alexschiller/osf.io,rdhyee/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,sloria/osf.io,aaxelb/osf.io,Nesiehr/osf.io,felliott/osf.io,pattisdr/osf.io,chennan47/osf.io,mluo613/osf.io,caneruguz/osf.io,binoculars/osf.io,adlius/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,mluo613/osf.io,cslzchen/osf.io,mfraezz/osf.io,hmoco/osf.io,crcresearch/osf.io,hmoco/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,erinspace/osf.io,felliott/osf.io,rdhyee/osf.io,binoculars/osf.io,sloria/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,leb2dg/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,adlius/osf.io | ---
+++
@@ -3,7 +3,6 @@
from admin.desk import views
urlpatterns = [
- url(r'^$', views.DeskCaseList.as_view(), name='cases'),
url(r'^customer/(?P<user_id>[a-z0-9]+)/$', views.DeskCustomer.as_view(),
name='customer'),
url(r'^cases/(?P<user_id>[a-z0-9]+)/$', views.DeskCaseList.as_view(), |
ad1c654422413b90c78614f66af61c1410cc1a77 | dsub/_dsub_version.py | dsub/_dsub_version.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.4.5.dev0'
| # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single source of truth for dsub's version.
This must remain small and dependency-free so that any dsub module may
import it without creating circular dependencies. Note that this module
is parsed as a text file by setup.py and changes to the format of this
file could break setup.py.
The version should follow formatting requirements specified in PEP-440.
- https://www.python.org/dev/peps/pep-0440
A typical release sequence will be versioned as:
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
DSUB_VERSION = '0.4.5'
| Update dsub version to 0.4.5 | Update dsub version to 0.4.5
PiperOrigin-RevId: 393155372
| Python | apache-2.0 | DataBiosphere/dsub,DataBiosphere/dsub | ---
+++
@@ -26,4 +26,4 @@
0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ...
"""
-DSUB_VERSION = '0.4.5.dev0'
+DSUB_VERSION = '0.4.5' |
a2e63f05d7992058b09a3d8e72b91e022cb94ef1 | core/urls.py | core/urls.py | from django.conf.urls import include, url
from django.views.generic import TemplateView
from tastypie.api import Api
from .api import ImageResource, ThumbnailResource, PinResource, UserResource
v1_api = Api(api_name='v1')
v1_api.register(ImageResource())
v1_api.register(ThumbnailResource())
v1_api.register(PinResource())
v1_api.register(UserResource())
urlpatterns = [
url(r'^api/', include(v1_api.urls, namespace='api')),
url(r'^pins/pin-form/$', TemplateView.as_view(template_name='core/pin_form.html'),
name='pin-form'),
url(r'^pins/tags/(?P<tag>(\w|-)+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='tag-pins'),
url(r'^pins/users/(?P<user>(\w|-)+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='user-pins'),
url(r'^(?P<pin>[0-9]+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='recent-pins'),
url(r'^$', TemplateView.as_view(template_name='core/pins.html'),
name='recent-pins'),
]
| from django.conf.urls import include, url
from django.views.generic import TemplateView
from tastypie.api import Api
from .api import ImageResource, ThumbnailResource, PinResource, UserResource
v1_api = Api(api_name='v1')
v1_api.register(ImageResource())
v1_api.register(ThumbnailResource())
v1_api.register(PinResource())
v1_api.register(UserResource())
urlpatterns = [
url(r'^api/', include(v1_api.urls, namespace='api')),
url(r'^pins/pin-form/$', TemplateView.as_view(template_name='core/pin_form.html'),
name='pin-form'),
url(r'^pins/tags/(?P<tag>(\w|-)+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='tag-pins'),
url(r'^pins/users/(?P<user>(\w|-)+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='user-pins'),
url(r'^(?P<pin>[0-9]+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='pin-detail'),
url(r'^$', TemplateView.as_view(template_name='core/pins.html'),
name='recent-pins'),
]
| Correct the name for specified pin | Fix: Correct the name for specified pin
| Python | bsd-2-clause | pinry/pinry,lapo-luchini/pinry,lapo-luchini/pinry,pinry/pinry,pinry/pinry,pinry/pinry,lapo-luchini/pinry,lapo-luchini/pinry | ---
+++
@@ -22,7 +22,7 @@
url(r'^pins/users/(?P<user>(\w|-)+)/$', TemplateView.as_view(template_name='core/pins.html'),
name='user-pins'),
url(r'^(?P<pin>[0-9]+)/$', TemplateView.as_view(template_name='core/pins.html'),
- name='recent-pins'),
+ name='pin-detail'),
url(r'^$', TemplateView.as_view(template_name='core/pins.html'),
name='recent-pins'),
] |
755d8f6438ac8f7c3da6c5959e54915e7835b483 | run.py | run.py | #!venv/bin/python
from app import telomere
telomere.run(host='0.0.0.0', debug=True)
| #!/usr/bin/env python
from app import telomere
telomere.run(host='0.0.0.0', debug=True)
| Change to a better shebang | Change to a better shebang
| Python | mit | rabramley/telomere,rabramley/telomere,rabramley/telomere | ---
+++
@@ -1,4 +1,4 @@
-#!venv/bin/python
+#!/usr/bin/env python
from app import telomere
telomere.run(host='0.0.0.0', debug=True)
|
9a5dc452f181fac45a8c0efcd3f70e116efefc2a | erpnext/patches/v7_0/setup_account_table_for_expense_claim_type_if_exists.py | erpnext/patches/v7_0/setup_account_table_for_expense_claim_type_if_exists.py | from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("hr", "doctype", "expense_claim_type")
for expense_claim_type in frappe.get_all("Expense Claim Type", fields=["name", "default_account"]):
if expense_claim_type.default_account:
doc = frappe.get_doc("Expense Claim Type", expense_claim_type.name)
doc.append("accounts", {
"company": frappe.db.get_value("Account", expense_claim_type.default_account, "company"),
"default_account": expense_claim_type.default_account,
})
doc.save(ignore_permissions=True) | from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("hr", "doctype", "expense_claim_type")
frappe.reload_doc("hr", "doctype", "expense_claim_account")
for expense_claim_type in frappe.get_all("Expense Claim Type", fields=["name", "default_account"]):
if expense_claim_type.default_account:
doc = frappe.get_doc("Expense Claim Type", expense_claim_type.name)
doc.append("accounts", {
"company": frappe.db.get_value("Account", expense_claim_type.default_account, "company"),
"default_account": expense_claim_type.default_account,
})
doc.save(ignore_permissions=True) | Patch fixed for expense claim type | Patch fixed for expense claim type
| Python | agpl-3.0 | njmube/erpnext,indictranstech/erpnext,geekroot/erpnext,geekroot/erpnext,gsnbng/erpnext,Aptitudetech/ERPNext,indictranstech/erpnext,njmube/erpnext,indictranstech/erpnext,njmube/erpnext,geekroot/erpnext,geekroot/erpnext,indictranstech/erpnext,njmube/erpnext,gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext | ---
+++
@@ -3,6 +3,7 @@
def execute():
frappe.reload_doc("hr", "doctype", "expense_claim_type")
+ frappe.reload_doc("hr", "doctype", "expense_claim_account")
for expense_claim_type in frappe.get_all("Expense Claim Type", fields=["name", "default_account"]):
if expense_claim_type.default_account: |
d4f8746f6423c528009f02a4bdc193b311ccfd23 | Discord/tree.py | Discord/tree.py |
from discord import app_commands
from discord.ext import commands
import logging
import sys
import traceback
import sentry_sdk
class CommandTree(app_commands.CommandTree):
async def on_error(self, interaction, error):
if (
isinstance(error, app_commands.TransformerError) and
isinstance(
error.__cause__, commands.PartialEmojiConversionFailure
)
):
ctx = await interaction.client.get_context(interaction)
await ctx.embed_reply(
f"{ctx.bot.error_emoji} "
f"`{error.value}` doesn't seem to be a custom emoji"
)
return
sentry_sdk.capture_exception(error)
print(
f"Ignoring exception in slash command {interaction.command.name}",
# TODO: Use full name
file = sys.stderr
)
traceback.print_exception(
type(error), error, error.__traceback__, file = sys.stderr
)
logging.getLogger("errors").error(
"Uncaught exception\n",
exc_info = (type(error), error, error.__traceback__)
)
|
from discord import app_commands
from discord.ext import commands
import logging
import sys
import traceback
import sentry_sdk
class CommandTree(app_commands.CommandTree):
async def on_error(self, interaction, error):
# Command Invoke Error
if isinstance(error, app_commands.CommandInvokeError):
# Bot missing permissions
if isinstance(error.original, commands.BotMissingPermissions):
bot = interaction.client
ctx = await bot.get_context(interaction)
missing_permissions = bot.inflect_engine.join([
f"`{permission}`"
for permission in error.original.missing_permissions
])
permission_declension = bot.inflect_engine.plural(
'permission', len(error.original.missing_permissions)
)
await ctx.embed_reply(
"I don't have permission to do that here\n"
f"I need the {missing_permissions} {permission_declension}"
)
return
if (
isinstance(error, app_commands.TransformerError) and
isinstance(
error.__cause__, commands.PartialEmojiConversionFailure
)
):
ctx = await interaction.client.get_context(interaction)
await ctx.embed_reply(
f"{ctx.bot.error_emoji} "
f"`{error.value}` doesn't seem to be a custom emoji"
)
return
sentry_sdk.capture_exception(error)
print(
f"Ignoring exception in slash command {interaction.command.name}",
# TODO: Use full name
file = sys.stderr
)
traceback.print_exception(
type(error), error, error.__traceback__, file = sys.stderr
)
logging.getLogger("errors").error(
"Uncaught exception\n",
exc_info = (type(error), error, error.__traceback__)
)
| Handle bot missing permissions for app commands | [Discord] Handle bot missing permissions for app commands
| Python | mit | Harmon758/Harmonbot,Harmon758/Harmonbot | ---
+++
@@ -12,6 +12,25 @@
class CommandTree(app_commands.CommandTree):
async def on_error(self, interaction, error):
+ # Command Invoke Error
+ if isinstance(error, app_commands.CommandInvokeError):
+ # Bot missing permissions
+ if isinstance(error.original, commands.BotMissingPermissions):
+ bot = interaction.client
+ ctx = await bot.get_context(interaction)
+ missing_permissions = bot.inflect_engine.join([
+ f"`{permission}`"
+ for permission in error.original.missing_permissions
+ ])
+ permission_declension = bot.inflect_engine.plural(
+ 'permission', len(error.original.missing_permissions)
+ )
+ await ctx.embed_reply(
+ "I don't have permission to do that here\n"
+ f"I need the {missing_permissions} {permission_declension}"
+ )
+ return
+
if (
isinstance(error, app_commands.TransformerError) and
isinstance( |
b89982f7b66b46f4338ff2758219d7419e36d6ba | lms/djangoapps/api_manager/management/commands/migrate_orgdata.py | lms/djangoapps/api_manager/management/commands/migrate_orgdata.py | import json
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from api_manager.models import GroupProfile, Organization
class Command(BaseCommand):
"""
Migrates legacy organization data and user relationships from older Group model approach to newer concrete Organization model
"""
def handle(self, *args, **options):
org_groups = GroupProfile.objects.filter(group_type='organization')
for org in org_groups:
data = json.loads(org.data)
migrated_org = Organization.objects.create(
name=data['name'],
display_name=data['display_name'],
contact_name=data['contact_name'],
contact_email=data['contact_email'],
contact_phone=data['contact_phone']
)
group = Group.objects.get(groupprofile=org.id)
users = group.user_set.all()
for user in users:
migrated_org.users.add(user)
linked_groups = group.grouprelationship.get_linked_group_relationships()
for linked_group in linked_groups:
if linked_group.to_group_relationship_id is not org.id: # Don't need to carry the symmetrical component
actual_group = Group.objects.get(id=linked_group.to_group_relationship_id)
migrated_org.groups.add(actual_group)
| import json
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from api_manager.models import GroupProfile, Organization
class Command(BaseCommand):
"""
Migrates legacy organization data and user relationships from older Group model approach to newer concrete Organization model
"""
def handle(self, *args, **options):
org_groups = GroupProfile.objects.filter(group_type='organization')
for org in org_groups:
data = json.loads(org.data)
name = org.name
display_name = data.get('display_name', name)
contact_name = data.get('contact_name', None)
contact_email = data.get('email', None)
if contact_email is None:
contact_email = data.get('contact_email', None)
contact_phone = data.get('phone', None)
if contact_phone is None:
contact_phone = data.get('contact_phone', None)
migrated_org = Organization.objects.create(
name=name,
display_name=display_name,
contact_name=contact_name,
contact_email=contact_email,
contact_phone=contact_phone
)
group = Group.objects.get(groupprofile=org.id)
users = group.user_set.all()
for user in users:
migrated_org.users.add(user)
linked_groups = group.grouprelationship.get_linked_group_relationships()
for linked_group in linked_groups:
if linked_group.to_group_relationship_id is not org.id: # Don't need to carry the symmetrical component
actual_group = Group.objects.get(id=linked_group.to_group_relationship_id)
migrated_org.groups.add(actual_group)
| Tweak to migration in order to accomodate old names for data fields and allow for if data fields were not present | Tweak to migration in order to accomodate old names for data fields and allow for if data fields were not present
| Python | agpl-3.0 | edx-solutions/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform | ---
+++
@@ -16,12 +16,23 @@
for org in org_groups:
data = json.loads(org.data)
+
+ name = org.name
+ display_name = data.get('display_name', name)
+ contact_name = data.get('contact_name', None)
+ contact_email = data.get('email', None)
+ if contact_email is None:
+ contact_email = data.get('contact_email', None)
+ contact_phone = data.get('phone', None)
+ if contact_phone is None:
+ contact_phone = data.get('contact_phone', None)
+
migrated_org = Organization.objects.create(
- name=data['name'],
- display_name=data['display_name'],
- contact_name=data['contact_name'],
- contact_email=data['contact_email'],
- contact_phone=data['contact_phone']
+ name=name,
+ display_name=display_name,
+ contact_name=contact_name,
+ contact_email=contact_email,
+ contact_phone=contact_phone
)
group = Group.objects.get(groupprofile=org.id)
users = group.user_set.all() |
abe1f4fbc846b51f1069f3e7da48524876f2caf6 | blackbelt/slack.py | blackbelt/slack.py | from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":black_joker:")
def post_message(message, room='#sre'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
| from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#sre'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
| Add custom emoji for blackbelt | Add custom emoji for blackbelt
| Python | mit | apiaryio/black-belt | ---
+++
@@ -16,7 +16,7 @@
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
- return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":black_joker:")
+ return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#sre'): |
3d1edf98284979ae8eade5804e44fb312bece1b0 | sir/schema/__init__.py | sir/schema/__init__.py | # Copyright (c) 2014 Wieland Hoffmann
# License: MIT, see LICENSE for details
from mbdata import models
from . import modelext
from .searchentities import SearchEntity as E, SearchField as F
SearchRecording = E(modelext.CustomRecording, [
F("mbid", "gid"),
F("recording", "name"),
F("arid", "artist_credit.artists.artist.gid"),
# F("artistname", ["artist_credit.name",
# "artist_credit.artists.artist.name"]),
F("reid", "tracks.medium.release.gid"),
F("release", "tracks.medium.release.name"),
F("comment", "comment"),
F("dur", "length"),
F("video", "video")
])
SearchReleaseGroup = E(modelext.CustomReleaseGroup, [
F("mbid", "gid"),
F("release-group", "name"),
# F("release", "releases.name"),
F("reid", "releases.gid"),
])
SCHEMA = {
"recording": SearchRecording,
"release-group": SearchReleaseGroup
}
| # Copyright (c) 2014 Wieland Hoffmann
# License: MIT, see LICENSE for details
from mbdata import models
from . import modelext
from .searchentities import SearchEntity as E, SearchField as F
SearchRecording = E(modelext.CustomRecording, [
F("mbid", "gid"),
F("recording", "name"),
F("arid", "artist_credit.artists.artist.gid"),
# F("artistname", ["artist_credit.name",
# "artist_credit.artists.artist.name"]),
F("reid", "tracks.medium.release.gid"),
F("release", "tracks.medium.release.name"),
F("comment", "comment"),
F("dur", "length"),
F("video", "video")
])
SearchReleaseGroup = E(modelext.CustomReleaseGroup, [
F("mbid", "gid"),
F("release-group", "name"),
# F("release", "releases.name"),
F("reid", "releases.gid"),
F("releases", "releases.gid", transformfunc=len),
F("credit-name", "artist_credit.artists.artist.name")
])
SCHEMA = {
"recording": SearchRecording,
"release-group": SearchReleaseGroup
}
| Add missing release group fields | schema: Add missing release group fields
| Python | mit | jeffweeksio/sir | ---
+++
@@ -23,6 +23,8 @@
F("release-group", "name"),
# F("release", "releases.name"),
F("reid", "releases.gid"),
+ F("releases", "releases.gid", transformfunc=len),
+ F("credit-name", "artist_credit.artists.artist.name")
])
|
3141b73ddeb14184062ab5ff4436ebb87de02253 | dayonetools/services/__init__.py | dayonetools/services/__init__.py | """Common services code"""
from datetime import datetime
import importlib
AVAILABLE_SERVICES = ['habit_list', 'idonethis', 'nikeplus']
SERVICES_PKG = 'dayonetools.services'
def get_service_module(service_name):
"""Import given service from dayonetools.services package"""
module = '%s.%s' % (SERVICES_PKG, service_name)
return importlib.import_module(module)
def convert_to_dayone_date_string(date):
"""
Convert given date in 'yyyy-mm-dd' format into dayone accepted format of
iso8601
The timestamp will match the current time but year, month, and day will
be replaced with given arguments.
"""
year, month, day = date.split('-')
now = datetime.utcnow()
# Dayone doesn't read entries correctly when date has a ms component
ms = 0
date = now.replace(year=int(year),
month=int(month),
day=int(day),
microsecond=ms)
iso_string = date.isoformat()
# Very specific format for dayone, if the 'Z' is not in the
# correct positions the entries will not show up in dayone at all.
return iso_string + 'Z'
# Make all services available from this level
for service_name in AVAILABLE_SERVICES:
service = get_service_module(service_name)
| """Common services code"""
AVAILABLE_SERVICES = ['habit_list', 'idonethis', 'nikeplus']
def get_service_module(service_name):
"""Import given service from dayonetools.services package"""
import importlib
services_pkg = 'dayonetools.services'
module = '%s.%s' % (services_pkg, service_name)
return importlib.import_module(module)
def convert_to_dayone_date_string(date):
"""
Convert given date in 'yyyy-mm-dd' format into dayone accepted format of
iso8601
The timestamp will match the current time but year, month, and day will
be replaced with given arguments.
"""
year, month, day = date.split('-')
from datetime import datetime
now = datetime.utcnow()
# Dayone doesn't read entries correctly when date has a ms component
ms = 0
date = now.replace(year=int(year),
month=int(month),
day=int(day),
microsecond=ms)
iso_string = date.isoformat()
# Very specific format for dayone, if the 'Z' is not in the
# correct positions the entries will not show up in dayone at all.
return iso_string + 'Z'
# Make all services available from this level
for service_name in AVAILABLE_SERVICES:
service = get_service_module(service_name)
| Move some imports internal so importing services package is not so cluttered | Move some imports internal so importing services package is not so cluttered
| Python | mit | durden/dayonetools | ---
+++
@@ -1,16 +1,15 @@
"""Common services code"""
+AVAILABLE_SERVICES = ['habit_list', 'idonethis', 'nikeplus']
-from datetime import datetime
-import importlib
-
-AVAILABLE_SERVICES = ['habit_list', 'idonethis', 'nikeplus']
-SERVICES_PKG = 'dayonetools.services'
def get_service_module(service_name):
"""Import given service from dayonetools.services package"""
- module = '%s.%s' % (SERVICES_PKG, service_name)
+ import importlib
+ services_pkg = 'dayonetools.services'
+
+ module = '%s.%s' % (services_pkg, service_name)
return importlib.import_module(module)
@@ -24,6 +23,8 @@
"""
year, month, day = date.split('-')
+
+ from datetime import datetime
now = datetime.utcnow()
# Dayone doesn't read entries correctly when date has a ms component |
8a966a2cf96009d078ee0ba6fa020e2de782ae7a | brewdata/__init__.py | brewdata/__init__.py | #!/usr/bin/env python
import os
def where():
"""
Return the installation location of BrewData
"""
f = os.path.split(__file__)[0]
return os.path.abspath(f)
def cereals():
return(os.path.join(where(), 'cereals'))
def hops():
return(os.path.join(where(), 'hops'))
def yeast():
return(os.path.join(where(), 'yeast'))
if __name__ == '__main__':
print(where())
| #!/usr/bin/env python
import os
def where():
"""
Return the installation location of BrewData
"""
f = os.path.split(__file__)[0]
return os.path.abspath(f)
def cereals():
return(os.path.join(where(), b'cereals'))
def hops():
return(os.path.join(where(), b'hops'))
def yeast():
return(os.path.join(where(), b'yeast'))
if __name__ == '__main__':
print(where())
| Set path names to be byte strings | Set path names to be byte strings
| Python | mit | chrisgilmerproj/brewdata,chrisgilmerproj/brewdata | ---
+++
@@ -12,15 +12,15 @@
def cereals():
- return(os.path.join(where(), 'cereals'))
+ return(os.path.join(where(), b'cereals'))
def hops():
- return(os.path.join(where(), 'hops'))
+ return(os.path.join(where(), b'hops'))
def yeast():
- return(os.path.join(where(), 'yeast'))
+ return(os.path.join(where(), b'yeast'))
if __name__ == '__main__': |
93ea521678c283d8211aed9ca88db6c5dc068362 | process_pic.py | process_pic.py | from pic import Picture
from path import Path
import argparse, json
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
def process(dir_path, nb_faces=1, margin=0.4):
dir = Path(dir_path)
print(dir)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
| from pic import Picture
from path import Path
import argparse
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
parser.add_argument("path", help="Path for the picture or the directory")
parser.add_argument(
"-n",
"--nb_faces",
type=int,
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
parser.add_argument(
"--margin",
"-m",
type=float,
help="Specify the margin around the face if the face_crop is activate")
parser.add_argument("--json", "-j", help="Path to a config file")
def load_json_config(path):
"""
Import a config file written in json
"""
import json
# Pretend that we load the following JSON file:
return json.loads(Path(path).text())
def process_pic(path, nb_faces=1, face_crop=False, margin=0.4):
file_path = Path(path)
pass
def merge(dict_1, dict_2):
"""Merge two dictionaries.
Values that evaluate to true take priority over falsy values.
`dict_1` takes priority over `dict_2`.
"""
return dict((str(key), dict_1.get(key) or dict_2.get(key))
for key in set(dict_2) | set(dict_1))
if __name__ == '__main__':
args = parser.parse_args()
if args.json:
config = load_json_config(args.json)
else:
config = {}
# Shell options takes priority over json config
config = merge(vars(args), config)
| Add a function to read json config files | Add a function to read json config files
| Python | mit | Dixneuf19/fuzzy-octo-disco | ---
+++
@@ -1,6 +1,6 @@
from pic import Picture
from path import Path
-import argparse, json
+import argparse
parser = argparse.ArgumentParser(
description=
"Process the picture or the directory, given the json config file")
@@ -12,13 +12,42 @@
help=
"0 | 1 | n to don't search for any face | search and expect exactly one face | search for multiple face"
)
+parser.add_argument(
+ "--margin",
+ "-m",
+ type=float,
+ help="Specify the margin around the face if the face_crop is activate")
+parser.add_argument("--json", "-j", help="Path to a config file")
-def process(dir_path, nb_faces=1, margin=0.4):
- dir = Path(dir_path)
- print(dir)
+def load_json_config(path):
+ """
+ Import a config file written in json
+ """
+ import json
+ # Pretend that we load the following JSON file:
+ return json.loads(Path(path).text())
+
+
+def process_pic(path, nb_faces=1, face_crop=False, margin=0.4):
+ file_path = Path(path)
+ pass
+
+
+def merge(dict_1, dict_2):
+ """Merge two dictionaries.
+ Values that evaluate to true take priority over falsy values.
+ `dict_1` takes priority over `dict_2`.
+ """
+ return dict((str(key), dict_1.get(key) or dict_2.get(key))
+ for key in set(dict_2) | set(dict_1))
if __name__ == '__main__':
args = parser.parse_args()
- print(args)
+ if args.json:
+ config = load_json_config(args.json)
+ else:
+ config = {}
+ # Shell options takes priority over json config
+ config = merge(vars(args), config) |
4218d2a4d711f5a516423b1f4aa2685fc5a456b3 | app/brain/user_management/account_updater.py | app/brain/user_management/account_updater.py | from flask_login import current_user
from app.brain.user_management.change_password_result import ChangePasswordResult
from app.brain.utilities import hash_password
class AccountUpdater(object):
@staticmethod
def change_password(old_password, new_password, confirm_password):
if new_password != confirm_password:
return ChangePasswordResult.NEW_PASSWORDS_DO_NOT_MATCH
elif not current_user.is_authenticated:
# Some how the user isn't logged in --- this should never happen
return ChangePasswordResult.CURRENT_PASSWORD_INCORRECT
elif hash_password(old_password) != current_user.password:
return ChangePasswordResult.CURRENT_PASSWORD_INCORRECT
else:
# If we get to here, we go ahead and change the password
# TODO: This logic needs to be updated
pass
| from flask_login import current_user
from app.brain.user_management.change_password_result import ChangePasswordResult
from app.brain.utilities import hash_password
from app.service import UsersService
class AccountUpdater(object):
@staticmethod
def change_password(old_password, new_password, confirm_password):
if new_password != confirm_password:
return ChangePasswordResult.NEW_PASSWORDS_DO_NOT_MATCH
elif not current_user.is_authenticated:
# Some how the user isn't logged in --- this should never happen
return ChangePasswordResult.CURRENT_PASSWORD_INCORRECT
elif hash_password(old_password) != current_user.password:
return ChangePasswordResult.CURRENT_PASSWORD_INCORRECT
else:
# If we get to here, we go ahead and change the password
UsersService.change_password(current_user, hash_password(new_password))
| Put in call to to UsersService to actually change password | Put in call to to UsersService to actually change password
| Python | mit | pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise,pbraunstein/trackercise | ---
+++
@@ -2,6 +2,7 @@
from app.brain.user_management.change_password_result import ChangePasswordResult
from app.brain.utilities import hash_password
+from app.service import UsersService
class AccountUpdater(object):
@@ -16,5 +17,4 @@
return ChangePasswordResult.CURRENT_PASSWORD_INCORRECT
else:
# If we get to here, we go ahead and change the password
- # TODO: This logic needs to be updated
- pass
+ UsersService.change_password(current_user, hash_password(new_password)) |
88238ca8a0c3169e9350434ecd517eafdb118b88 | pybinding/greens.py | pybinding/greens.py | import numpy as np
import _pybinding
from .results import LDOSpoint
__all__ = ['Greens', 'make_kpm']
class Greens:
def __init__(self, impl: _pybinding.Greens):
self.impl = impl
def calc_ldos(self, energy: np.ndarray, broadening: float, position: tuple, sublattice: int=-1):
return LDOSpoint(energy, self.impl.calc_ldos(energy, broadening, position, sublattice))
def make_kpm(model, lambda_value=4.0, energy_range=(0.0, 0.0)):
return Greens(_pybinding.KPM(model, lambda_value, energy_range))
| import numpy as np
import _pybinding
from .results import LDOSpoint
__all__ = ['Greens', 'make_kpm']
class Greens:
def __init__(self, impl: _pybinding.Greens):
self.impl = impl
def report(self, shortform=False):
return self.impl.report(shortform)
def calc_ldos(self, energy: np.ndarray, broadening: float, position: tuple, sublattice: int=-1):
return LDOSpoint(energy, self.impl.calc_ldos(energy, broadening, position, sublattice))
def make_kpm(model, lambda_value=4.0, energy_range=(0.0, 0.0)):
return Greens(_pybinding.KPM(model, lambda_value, energy_range))
| Add report() method to Greens | Add report() method to Greens
| Python | bsd-2-clause | dean0x7d/pybinding,MAndelkovic/pybinding,MAndelkovic/pybinding,dean0x7d/pybinding,MAndelkovic/pybinding,dean0x7d/pybinding | ---
+++
@@ -10,6 +10,9 @@
def __init__(self, impl: _pybinding.Greens):
self.impl = impl
+ def report(self, shortform=False):
+ return self.impl.report(shortform)
+
def calc_ldos(self, energy: np.ndarray, broadening: float, position: tuple, sublattice: int=-1):
return LDOSpoint(energy, self.impl.calc_ldos(energy, broadening, position, sublattice))
|
63cf6f6228490c9944711106cd134254eafac3ca | regressors/plots.py | regressors/plots.py | # -*- coding: utf-8 -*-
"""This module contains functions for making plots relevant to regressors."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
| # -*- coding: utf-8 -*-
"""This module contains functions for making plots relevant to regressors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import seaborn.apionly as sns
from sklearn import linear_model as lm
from regressors import stats
supported_linear_models = (lm.LinearRegression, lm.Lasso, lm.Ridge,
lm.ElasticNet)
def plot_residuals(clf, X, y, r_type='standardized', figsize=(10, 10)):
"""Plot residuals of a linear model.
Parameters
----------
clf : sklearn.linear_model
A scikit-learn linear model classifier with a `predict()` method.
X : numpy.ndarray
Training data used to fit the classifier.
y : numpy.ndarray
Target training values, of shape = [n_samples].
r_type : str
Type of residuals to return: ['raw', 'standardized', 'studentized'].
Defaults to 'standardized'.
* 'raw' will return the raw residuals.
* 'standardized' will return the standardized residuals, also known as
internally studentized residuals.
* 'studentized' will return the externally studentized residuals.
figsize : tuple
A tuple indicating the size of the plot to be created, with format
(x-axis, y-axis). Defaults to (10, 10).
Returns
-------
matplotlib.figure.Figure
The Figure instance.
"""
# Ensure we only plot residuals using classifiers we have tested
assert isinstance(clf, supported_linear_models), (
"Classifiers of type {} not currently supported.".format(type(clf))
)
# With sns, only use their API so you don't change user stuff
sns.set_context("talk") # Increase font size on plot
sns.set_style("whitegrid")
# Get residuals or standardized residuals
resids = stats.residuals(clf, X, y, r_type)
predictions = clf.predict(X)
# Generate residual plot
y_label = {'raw': 'Residuals', 'standardized': 'Standardized Residuals',
'studentized': 'Studentized Residuals'}
fig = plt.figure('residuals', figsize=figsize)
plt.scatter(predictions, resids, s=14, c='gray', alpha=0.7)
plt.hlines(y=0, xmin=predictions.min() - 100, xmax=predictions.max() + 100,
linestyle='dotted')
plt.title("Residuals Plot")
plt.xlabel("Predictions")
plt.ylabel(y_label[r_type])
plt.show()
return fig
| Add function to make residuals plot | Add function to make residuals plot
| Python | isc | nsh87/regressors | ---
+++
@@ -1,9 +1,69 @@
# -*- coding: utf-8 -*-
"""This module contains functions for making plots relevant to regressors."""
+from __future__ import absolute_import
+from __future__ import division
from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
from __future__ import unicode_literals
+import matplotlib.pyplot as plt
+import seaborn.apionly as sns
+from sklearn import linear_model as lm
+from regressors import stats
+
+supported_linear_models = (lm.LinearRegression, lm.Lasso, lm.Ridge,
+ lm.ElasticNet)
+
+
+def plot_residuals(clf, X, y, r_type='standardized', figsize=(10, 10)):
+ """Plot residuals of a linear model.
+
+ Parameters
+ ----------
+ clf : sklearn.linear_model
+ A scikit-learn linear model classifier with a `predict()` method.
+ X : numpy.ndarray
+ Training data used to fit the classifier.
+ y : numpy.ndarray
+ Target training values, of shape = [n_samples].
+ r_type : str
+ Type of residuals to return: ['raw', 'standardized', 'studentized'].
+ Defaults to 'standardized'.
+
+ * 'raw' will return the raw residuals.
+ * 'standardized' will return the standardized residuals, also known as
+ internally studentized residuals.
+ * 'studentized' will return the externally studentized residuals.
+ figsize : tuple
+ A tuple indicating the size of the plot to be created, with format
+ (x-axis, y-axis). Defaults to (10, 10).
+
+ Returns
+ -------
+ matplotlib.figure.Figure
+ The Figure instance.
+ """
+ # Ensure we only plot residuals using classifiers we have tested
+ assert isinstance(clf, supported_linear_models), (
+ "Classifiers of type {} not currently supported.".format(type(clf))
+ )
+ # With sns, only use their API so you don't change user stuff
+ sns.set_context("talk") # Increase font size on plot
+ sns.set_style("whitegrid")
+ # Get residuals or standardized residuals
+ resids = stats.residuals(clf, X, y, r_type)
+ predictions = clf.predict(X)
+ # Generate residual plot
+ y_label = {'raw': 'Residuals', 'standardized': 'Standardized Residuals',
+ 'studentized': 'Studentized Residuals'}
+ fig = plt.figure('residuals', figsize=figsize)
+ plt.scatter(predictions, resids, s=14, c='gray', alpha=0.7)
+ plt.hlines(y=0, xmin=predictions.min() - 100, xmax=predictions.max() + 100,
+ linestyle='dotted')
+ plt.title("Residuals Plot")
+ plt.xlabel("Predictions")
+ plt.ylabel(y_label[r_type])
+ plt.show()
+ return fig
+ |
46d2b85e7df1b8e247a9ab09ea390de15cc84fb6 | CodeFights/sumUpNumbers.py | CodeFights/sumUpNumbers.py | #!/usr/local/bin/python
# Code Fights Sum Up Problem
import re
def sumUpNumbers(inputString):
return sum([int(n) for n in re.findall(r'\d+', inputString)])
def main():
tests = [
["2 apples, 12 oranges", 14],
["123450", 123450],
["Your payment method is invalid", 0]
]
for t in tests:
res = sumUpNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: sumUpNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: sumUpNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| #!/usr/local/bin/python
# Code Fights Sum Up Numbers Problem
import re
def sumUpNumbers(inputString):
return sum([int(n) for n in re.findall(r'\d+', inputString)])
def main():
tests = [
["2 apples, 12 oranges", 14],
["123450", 123450],
["Your payment method is invalid", 0]
]
for t in tests:
res = sumUpNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: sumUpNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: sumUpNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| Fix missing word in sum up numbers problem | Fix missing word in sum up numbers problem
| Python | mit | HKuz/Test_Code | ---
+++
@@ -1,5 +1,5 @@
#!/usr/local/bin/python
-# Code Fights Sum Up Problem
+# Code Fights Sum Up Numbers Problem
import re
|
61cebe12c001bb42350d8e9e99a7fa7d26fc7667 | openedx/stanford/lms/lib/courseware_search/lms_filter_generator.py | openedx/stanford/lms/lib/courseware_search/lms_filter_generator.py | """
Custom override of SearchFilterGenerator to use course tiles for
discovery search.
"""
from search.filter_generator import SearchFilterGenerator
from branding_stanford.models import TileConfiguration
from lms.lib.courseware_search.lms_filter_generator import LmsSearchFilterGenerator
class TileSearchFilterGenerator(LmsSearchFilterGenerator):
"""
SearchFilterGenerator for LMS Search.
"""
def field_dictionary(self, **kwargs):
"""
Return field filter dictionary for search.
"""
field_dictionary = super(TileSearchFilterGenerator, self).field_dictionary(**kwargs)
if not kwargs.get('user'):
# Adds tile courses for discovery search
course_tiles_ids = TileConfiguration.objects.filter(
enabled=True,
).values_list('course_id', flat=True).order_by('-change_date')
field_dictionary['course'] = list(course_tiles_ids)
return field_dictionary
| """
Custom override of SearchFilterGenerator to use course tiles for
discovery search.
"""
from search.filter_generator import SearchFilterGenerator
from branding_stanford.models import TileConfiguration
from lms.lib.courseware_search.lms_filter_generator import LmsSearchFilterGenerator
class TileSearchFilterGenerator(LmsSearchFilterGenerator):
"""
SearchFilterGenerator for LMS Search.
"""
def field_dictionary(self, **kwargs):
"""
Return field filter dictionary for search.
"""
field_dictionary = super(TileSearchFilterGenerator, self).field_dictionary(**kwargs)
if not kwargs.get('user'):
# Adds tile courses for discovery search
course_tiles_ids = TileConfiguration.objects.filter(
enabled=True,
).values_list('course_id', flat=True).order_by('-change_date')
courses = list(course_tiles_ids)
if len(courses):
field_dictionary['course'] = courses
return field_dictionary
| Use stanford search logic only if configured | Use stanford search logic only if configured
| Python | agpl-3.0 | Stanford-Online/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform | ---
+++
@@ -23,5 +23,7 @@
course_tiles_ids = TileConfiguration.objects.filter(
enabled=True,
).values_list('course_id', flat=True).order_by('-change_date')
- field_dictionary['course'] = list(course_tiles_ids)
+ courses = list(course_tiles_ids)
+ if len(courses):
+ field_dictionary['course'] = courses
return field_dictionary |
7e71e011fc4266b1edf21a0028cf878a71ab23fe | PyOpenWorm/plot.py | PyOpenWorm/plot.py | from PyOpenWorm import *
class Plot(DataObject):
"""
Object for storing plot data in PyOpenWorm.
Must be instantiated with a 2D list of coordinates.
"""
def __init__(self, data=False, *args, **kwargs):
DataObject.__init__(self, **kwargs)
Plot.DatatypeProperty('_data_string', self, multiple=False)
if (isinstance(data, list)) and (isinstance(data[0], list)):
# data is user-facing, _data_string is for db
self._data_string(self._to_string(data))
self.data = data
else:
raise ValueError('Plot must be instantiated with 2D list.')
def _to_string(self, input_list):
"""
Converts input_list to a string
for serialized storage in PyOpenWorm.
"""
return '|'.join([str(item) for item in input_list])
def _to_list(self, input_string):
"""
Converts from internal serlialized string
to a 2D list.
"""
out_list = []
for pair_string in input_string.split('|'):
pair_as_list = pair_string \
.replace('[', '') \
.replace(']', '') \
.split(',')
out_list.append(
map(float, pair_as_list)
)
return out_list
| from PyOpenWorm import *
class Plot(DataObject):
"""
Object for storing plot data in PyOpenWorm.
Must be instantiated with a 2D list of coordinates.
"""
def __init__(self, data=False, *args, **kwargs):
DataObject.__init__(self, **kwargs)
Plot.DatatypeProperty('_data_string', self, multiple=False)
if data:
self.set_data(data)
def _to_string(self, input_list):
"""
Converts input_list to a string
for serialized storage in PyOpenWorm.
"""
return '|'.join([str(item) for item in input_list])
def _to_list(self, input_string):
"""
Converts from internal serlialized string
to a 2D list.
"""
out_list = []
for pair_string in input_string.split('|'):
pair_as_list = pair_string \
.replace('[', '') \
.replace(']', '') \
.split(',')
out_list.append(
map(float, pair_as_list)
)
return out_list
def set_data(self, data):
"""
Set the data attribute, which is user-facing,
as well as the serialized _data_string
attribute, which is used for db storage.
"""
try:
# make sure we're dealing with a 2D list
assert isinstance(data, list)
assert isinstance(data[0], list)
self._data_string(self._to_string(data))
self.data = data
except (AssertionError, IndexError):
raise ValueError('Attribute "data" must be a 2D list of numbers.')
def get_data(self):
"""
Get the data stored for this plot.
"""
if self._data_string():
return self._to_list(self._data_string())
else:
raise AttributeError('You must call "set_data" first.')
| Add data setter and getter for Plot | Add data setter and getter for Plot
| Python | mit | gsarma/PyOpenWorm,openworm/PyOpenWorm,openworm/PyOpenWorm,gsarma/PyOpenWorm | ---
+++
@@ -12,13 +12,9 @@
DataObject.__init__(self, **kwargs)
Plot.DatatypeProperty('_data_string', self, multiple=False)
- if (isinstance(data, list)) and (isinstance(data[0], list)):
- # data is user-facing, _data_string is for db
- self._data_string(self._to_string(data))
- self.data = data
- else:
- raise ValueError('Plot must be instantiated with 2D list.')
-
+ if data:
+ self.set_data(data)
+
def _to_string(self, input_list):
"""
Converts input_list to a string
@@ -42,3 +38,26 @@
)
return out_list
+ def set_data(self, data):
+ """
+ Set the data attribute, which is user-facing,
+ as well as the serialized _data_string
+ attribute, which is used for db storage.
+ """
+ try:
+ # make sure we're dealing with a 2D list
+ assert isinstance(data, list)
+ assert isinstance(data[0], list)
+ self._data_string(self._to_string(data))
+ self.data = data
+ except (AssertionError, IndexError):
+ raise ValueError('Attribute "data" must be a 2D list of numbers.')
+
+ def get_data(self):
+ """
+ Get the data stored for this plot.
+ """
+ if self._data_string():
+ return self._to_list(self._data_string())
+ else:
+ raise AttributeError('You must call "set_data" first.') |
d080871e248cdf457fd1ff0023b5a651456d5b0b | openfisca_senegal/survey_scenarios.py | openfisca_senegal/survey_scenarios.py | # -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
| # -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
baseline_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
baseline_tax_benefit_system = baseline_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.variables.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if baseline_tax_benefit_system is not None:
self.new_simulation(use_baseline = True)
| Fix survey_scenario (use core v20 syntax) | Fix survey_scenario (use core v20 syntax)
| Python | agpl-3.0 | openfisca/senegal | ---
+++
@@ -14,7 +14,7 @@
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
- reference_tax_benefit_system = None, year = None):
+ baseline_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
@@ -23,13 +23,13 @@
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
- reference_tax_benefit_system = reference_tax_benefit_system
+ baseline_tax_benefit_system = baseline_tax_benefit_system
)
self.used_as_input_variables = list(
- set(tax_benefit_system.column_by_name.keys()).intersection(
+ set(tax_benefit_system.variables.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
- if reference_tax_benefit_system is not None:
- self.new_simulation(reference = True)
+ if baseline_tax_benefit_system is not None:
+ self.new_simulation(use_baseline = True) |
6059bff12c159fbe43e789c4b55fe5eb012517e9 | src/checker/plugin/filters/depth.py | src/checker/plugin/filters/depth.py | from filter import FilterException
from common import PluginType
from yapsy.IPlugin import IPlugin
import logging
class DepthFilter(IPlugin):
category = PluginType.FILTER
id = "depth"
def __init__(self):
self.__log = logging.getLogger(__name__)
self.__conf = None
def setConf(self, conf):
self.__conf = conf
def setJournal(self, journal):
pass
def filter(self, transaction):
maxDepth = self.__conf.getProperty("maxDepth")
if maxDepth == 0:
return # unlimited
if transaction.depth > maxDepth:
self.__log.debug("Skipping " + transaction.uri +
" as it's depth " + str(transaction.depth) +
" and max depth condition is " + str(maxDepth))
raise FilterException()
| from filter import FilterException
from common import PluginType
from yapsy.IPlugin import IPlugin
import logging
class DepthFilter(IPlugin):
category = PluginType.FILTER
id = "depth"
def __init__(self):
self.__log = logging.getLogger(__name__)
self.__conf = None
def setConf(self, conf):
self.__conf = conf
def setJournal(self, journal):
pass
def filter(self, transaction):
maxDepth = self.__conf.getProperty("maxDepth", 0)
if maxDepth == 0:
return # unlimited
if transaction.depth > maxDepth:
self.__log.debug("Skipping " + transaction.uri +
" as it's depth " + str(transaction.depth) +
" and max depth condition is " + str(maxDepth))
raise FilterException()
| Add default value on property | Add default value on property
| Python | mit | eghuro/crawlcheck | ---
+++
@@ -20,7 +20,7 @@
pass
def filter(self, transaction):
- maxDepth = self.__conf.getProperty("maxDepth")
+ maxDepth = self.__conf.getProperty("maxDepth", 0)
if maxDepth == 0:
return # unlimited
if transaction.depth > maxDepth: |
d7b89cf5e8a0102034b0d9ed7f28fbe7fb0bc167 | cartoframes/viz/helpers/color_category_layer.py | cartoframes/viz/helpers/color_category_layer.py | from __future__ import absolute_import
from ..layer import Layer
def color_category_layer(source, value, top=11, palette='bold', title='', othersLabel='Others'):
return Layer(
source,
style={
'point': {
'color': 'ramp(top(${0}, {1}), {2})'.format(value, top, palette)
},
'line': {
'color': 'ramp(top(${0}, {1}), {2})'.format(value, top, palette)
},
'polygon': {
'color': 'opacity(ramp(top(${0}, {1}), {2}), 0.1)'.format(value, top, palette)
}
},
popup={
'hover': {
'label': title or value,
'value': '$' + value
}
},
legend={
'type': 'basic',
'ramp': 'color',
'heading': title or value,
'description': '',
'othersLabel': othersLabel
}
)
| from __future__ import absolute_import
from ..layer import Layer
def color_category_layer(source, value, top=11, palette='bold', title=''):
return Layer(
source,
style={
'point': {
'color': 'ramp(top(${0}, {1}), {2})'.format(value, top, palette)
},
'line': {
'color': 'ramp(top(${0}, {1}), {2})'.format(value, top, palette)
},
'polygon': {
'color': 'opacity(ramp(top(${0}, {1}), {2}), 0.9)'.format(value, top, palette)
}
},
popup={
'hover': {
'label': title or value,
'value': '$' + value
}
},
legend={
'type': 'basic',
'ramp': 'color',
'heading': title or value,
'description': '',
'othersLabel': 'Others'
}
)
| Use fixed value in the helper | Use fixed value in the helper
| Python | bsd-3-clause | CartoDB/cartoframes,CartoDB/cartoframes | ---
+++
@@ -3,7 +3,7 @@
from ..layer import Layer
-def color_category_layer(source, value, top=11, palette='bold', title='', othersLabel='Others'):
+def color_category_layer(source, value, top=11, palette='bold', title=''):
return Layer(
source,
style={
@@ -14,7 +14,7 @@
'color': 'ramp(top(${0}, {1}), {2})'.format(value, top, palette)
},
'polygon': {
- 'color': 'opacity(ramp(top(${0}, {1}), {2}), 0.1)'.format(value, top, palette)
+ 'color': 'opacity(ramp(top(${0}, {1}), {2}), 0.9)'.format(value, top, palette)
}
},
popup={
@@ -28,6 +28,6 @@
'ramp': 'color',
'heading': title or value,
'description': '',
- 'othersLabel': othersLabel
+ 'othersLabel': 'Others'
}
) |
c39e824119498fb6d669367fb53249b609edbaf2 | main.py | main.py | import requests
import os
import shutil
client_id = '' # Imgur application Client ID, fill this in.
directory_name = 'Imgur Albums\\' # Directory for the images to be stored
def scrape(album_id):
''' (str) -> list of str
Given an Imgur album ID, scrapes all the images within the album into a
folder. Returns a list containing all the links that were scraped.
'''
# Seperate directory for each album's images to be stored in
directory = directory_name+album_id+'\\'
if not os.path.exists(directory):
os.makedirs(directory) # Creates the full directory for the album
imageList = []
print('Loading Album: '+album_id)
link = 'https://api.imgur.com/3/album/' + album_id
header = {'Authorization': 'Client-Id '+client_id}
album = requests.get(link, headers=header).json()
if not album['success']:
return album['data']['error']
# Scrape image links from the album
for image in album['data']['images']:
imageList.append(image['link'])
# Downloads each image and writes to disk
for image in imageList:
download = requests.get(image, stream=True)
with open(directory+image[image.find('com/')+4:], 'wb') as outFile:
shutil.copyfileobj(download.raw, outFile)
return imageList
| import requests
import os
import shutil
client_id = '' # Imgur application Client ID, fill this in.
directory_name = 'Imgur Albums\\' # Directory for the images to be stored
def scrape(album_id):
''' (str) -> list of str
Given an Imgur album ID, scrapes all the images within the album into a
folder. Returns a list containing all the links that were scraped.
'''
# Seperate directory for each album's images to be stored in
directory = directory_name+album_id+'\\'
if not os.path.exists(directory):
os.makedirs(directory) # Creates the full directory for the album
imageList = []
print('Loading Album: '+album_id)
# Loading the album with Requests
link = 'https://api.imgur.com/3/album/' + album_id
header = {'Authorization': 'Client-Id '+client_id}
album = requests.get(link, headers=header).json()
if not album['success']:
raise Exception(album['data']['error'])
# Scrape image links from the album
for image in album['data']['images']:
imageList.append(image['link'])
# Downloads each image and writes to disk
for image in imageList:
download = requests.get(image, stream=True)
with open(directory+image[image.find('com/')+4:], 'wb') as outFile:
shutil.copyfileobj(download.raw, outFile)
return imageList
| Raise exception instead of returnning it | Raise exception instead of returnning it
| Python | mit | kevinleung987/Imgur-Album-Scraper | ---
+++
@@ -17,11 +17,12 @@
os.makedirs(directory) # Creates the full directory for the album
imageList = []
print('Loading Album: '+album_id)
+ # Loading the album with Requests
link = 'https://api.imgur.com/3/album/' + album_id
header = {'Authorization': 'Client-Id '+client_id}
album = requests.get(link, headers=header).json()
if not album['success']:
- return album['data']['error']
+ raise Exception(album['data']['error'])
# Scrape image links from the album
for image in album['data']['images']:
imageList.append(image['link']) |
8f84ce069159cb7d0dfb95a2a28f992a8a4f3556 | client.py | client.py | import threading
import select
class Client(object):
def __init__(self, sock, address, host):
self.socket = sock
self.server = host
self.address = address
self.connected = True
self.exit_request = False
self.thread = ClientThread(self)
self.thread.start()
self.socket.setblocking(0)
def disconnect(self):
self.exit_request = True
class ClientThread(threading.Thread):
def __init__(self, parent):
super(ClientThread, self).__init__()
self.parent = parent
def run(self):
while not self.parent.exit_request:
ready_to_read, ready_to_write, in_error = select.select(
[self.parent.socket], [], [], 2
)
if self.parent.socket not in ready_to_read:
continue
data = self.parent.socket.recv(1024)
if not data:
break
self.parent.socket.sendall(data)
self.parent.socket.sendall('Bye!')
self.parent.socket.close()
self.parent.connected = False
| import threading
import select
class Client(object):
def __init__(self, sock, address, host):
self.socket = sock
self.server = host
self.address = address
self.connected = True
self.exit_request = False
self.thread = ClientThread(self)
self.thread.start()
self.socket.setblocking(0)
def on_receive(self, data):
pass
def send(self, data):
self.socket.sendall(data)
def disconnect(self):
self.exit_request = True
class ClientThread(threading.Thread):
def __init__(self, parent):
super(ClientThread, self).__init__()
self.parent = parent
def run(self):
while not self.parent.exit_request:
ready_to_read, ready_to_write, in_error = select.select(
[self.parent.socket], [], [], 2
)
if self.parent.socket not in ready_to_read:
continue
data = self.parent.socket.recv(1024)
if not data:
break
self.parent.on_receive(data)
self.parent.socket.sendall('Bye!')
self.parent.socket.close()
self.parent.connected = False
| Add on_receive and send methods. | Add on_receive and send methods.
on_receive method is called when a new data is got.
send method is used to send data to the client.
| Python | mit | Mariusz-v7/WebSocketServer-Python- | ---
+++
@@ -14,6 +14,12 @@
self.thread.start()
self.socket.setblocking(0)
+
+ def on_receive(self, data):
+ pass
+
+ def send(self, data):
+ self.socket.sendall(data)
def disconnect(self):
self.exit_request = True
@@ -34,7 +40,7 @@
data = self.parent.socket.recv(1024)
if not data:
break
- self.parent.socket.sendall(data)
+ self.parent.on_receive(data)
self.parent.socket.sendall('Bye!')
self.parent.socket.close() |
2dde683b0a43a6339e446e37aab2726aaca65d83 | config.py | config.py | # Screen size
WIDTH = 320
HEIGHT = 240
SCREEN_SIZE = (WIDTH, HEIGHT)
# Update delay in ms
DELAY = 100
ALIVE_IMG_PATH="data/alive.png"
DEAD_IMG_PATH="data/dead.png"
CURSOR_DEAD_IMG_PATH="data/cursor.png"
CURSOR_ALIVE_IMG_PATH="data/cursor_alive.png"
ALIVE_IMG = None
DEAD_IMG = None
| # Screen size
WIDTH = 320
HEIGHT = 240
SCREEN_SIZE = (WIDTH, HEIGHT)
# Update delay in ms
# On ETN it's already pretty slow
DELAY = 0
ALIVE_IMG_PATH="data/alive.png"
DEAD_IMG_PATH="data/dead.png"
CURSOR_DEAD_IMG_PATH="data/cursor.png"
CURSOR_ALIVE_IMG_PATH="data/cursor_alive.png"
ALIVE_IMG = None
DEAD_IMG = None
| Set delay to 0, because ETN is slow. | Set delay to 0, because ETN is slow.
| Python | mit | dzeban/PyGameOfLife | ---
+++
@@ -4,7 +4,8 @@
SCREEN_SIZE = (WIDTH, HEIGHT)
# Update delay in ms
-DELAY = 100
+# On ETN it's already pretty slow
+DELAY = 0
ALIVE_IMG_PATH="data/alive.png"
DEAD_IMG_PATH="data/dead.png" |
dfca6054fd32c0904bc027dcddb347c378adf954 | dumper/file_uploader.py | dumper/file_uploader.py | import ftplib
import ntpath
import logging
import urllib.parse
import win32clipboard
class FileUploader:
def __init__(self, config):
self._host = config['Server']['host']
self._user = config['Server']['user']
self._passwd = config['Server']['password']
self._path = config['Server']['remote_path']
self._url = config['Server']['url']
# test ftp login
def upload(self, filepath):
try:
with ftplib.FTP(self._host, self._user, self._passwd) as ftp:
ftp.cwd(self._path)
name = ntpath.basename(filepath)
logging.info('Upload \'{}\''.format(name))
msg = ftp.storbinary('STOR ' + name, open(filepath, 'rb'))
logging.info(msg)
self._copy_to_clipboard(name)
except ftplib.all_errors as e:
logging.exception(e, exc_info=False)
def _copy_to_clipboard(self, name):
url = self._url + urllib.parse.quote(name)
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(url)
win32clipboard.CloseClipboard()
logging.info('Copied to clipboard \'{}\''.format(url))
| import ftplib
import ntpath
import logging
import urllib.parse
import win32clipboard
class FileUploader:
def __init__(self, config):
self._host = config['Server']['host']
self._user = config['Server']['user']
self._passwd = config['Server']['password']
self._path = config['Server']['remote_path']
self._url = config['Server']['url']
self._copy_url = config.getboolean('Settings', 'copy_url_to_clipboard')
# test ftp login
def upload(self, filepath):
try:
with ftplib.FTP(self._host, self._user, self._passwd) as ftp:
ftp.cwd(self._path)
name = ntpath.basename(filepath)
logging.info('Upload \'{}\''.format(name))
msg = ftp.storbinary('STOR ' + name, open(filepath, 'rb'))
logging.info(msg)
if self._copy_url:
self._copy_to_clipboard(name)
except ftplib.all_errors as e:
logging.exception(e, exc_info=False)
def _copy_to_clipboard(self, name):
url = urllib.parse.urljoin(self._url, urllib.parse.quote(name))
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(url)
win32clipboard.CloseClipboard()
logging.info('Copied to clipboard \'{}\''.format(url))
| Add copy url to clipboard setting | Add copy url to clipboard setting
| Python | mit | yschua/dumper | ---
+++
@@ -12,6 +12,7 @@
self._passwd = config['Server']['password']
self._path = config['Server']['remote_path']
self._url = config['Server']['url']
+ self._copy_url = config.getboolean('Settings', 'copy_url_to_clipboard')
# test ftp login
@@ -24,12 +25,13 @@
msg = ftp.storbinary('STOR ' + name, open(filepath, 'rb'))
logging.info(msg)
- self._copy_to_clipboard(name)
+ if self._copy_url:
+ self._copy_to_clipboard(name)
except ftplib.all_errors as e:
logging.exception(e, exc_info=False)
def _copy_to_clipboard(self, name):
- url = self._url + urllib.parse.quote(name)
+ url = urllib.parse.urljoin(self._url, urllib.parse.quote(name))
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(url) |
93b1f8e67b1154fd595a938ca41877eb76c7e995 | lcd.py | lcd.py | from telnetlib import Telnet
import time
tn = Telnet('192.168.1.15', 13666, None)
#tn.interact()
tn.write("hello\n")
tn.write("screen_add s1\n")
tn.write("screen_set s1 -priority 1\n")
tn.write("widget_add s1 w1 string\n")
tn.write("widget_add s1 w2 string\n")
tn.write("widget_set s1 w1 1 1 {It is a truth u}\n")
tn.write("widget_set s1 w2 1 2 {niversally ackno}\n")
print "sleeping"
time.sleep(5)
def lcd_string(x, telnet_obj, delay=5):
L = []
for i in range(len(x)):
if i % (15+16) == 0:
L.append(x[i:i+15+16])
for s in L:
s1 = s[0:15]
s2 = s[15:]
telnet_obj.write("widget_set s1 w1 1 1 {" + s1 + "}\n")
telnet_obj.write("widget_set s1 w2 1 2 {" + s2 + "}\n")
time.sleep(delay)
lcd_string('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', tn)
| #!/usr/bin/env python
from telnetlib import Telnet
import time
import sys
tn = Telnet('192.168.1.15', 13666, None)
pipe_contents = sys.stdin.read()
pipe_contents = pipe_contents.replace('\n', ' ')
tn.write("hello\n")
tn.write("screen_add s1\n")
tn.write("screen_set s1 -priority 1\n")
tn.write("widget_add s1 w1 string\n")
tn.write("widget_add s1 w2 string\n")
def lcd_string(x, telnet_obj, delay=2):
L = []
for i in range(len(x)):
if i % (15+16) == 0:
L.append(x[i:i+15+16])
for s in L:
s1 = s[0:15]
s2 = s[15:]
telnet_obj.write("widget_set s1 w1 1 1 {" + s1 + "}\n")
telnet_obj.write("widget_set s1 w2 1 2 {" + s2 + "}\n")
time.sleep(delay)
lcd_string(pipe_contents, tn)
| Read standard input instead of hard-coded strings. | Read standard input instead of hard-coded strings.
| Python | mit | zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie | ---
+++
@@ -1,20 +1,20 @@
+#!/usr/bin/env python
+
from telnetlib import Telnet
import time
+import sys
+
tn = Telnet('192.168.1.15', 13666, None)
-
-#tn.interact()
+pipe_contents = sys.stdin.read()
+pipe_contents = pipe_contents.replace('\n', ' ')
tn.write("hello\n")
tn.write("screen_add s1\n")
tn.write("screen_set s1 -priority 1\n")
tn.write("widget_add s1 w1 string\n")
tn.write("widget_add s1 w2 string\n")
-tn.write("widget_set s1 w1 1 1 {It is a truth u}\n")
-tn.write("widget_set s1 w2 1 2 {niversally ackno}\n")
-print "sleeping"
-time.sleep(5)
-def lcd_string(x, telnet_obj, delay=5):
+def lcd_string(x, telnet_obj, delay=2):
L = []
for i in range(len(x)):
if i % (15+16) == 0:
@@ -26,4 +26,4 @@
telnet_obj.write("widget_set s1 w2 1 2 {" + s2 + "}\n")
time.sleep(delay)
-lcd_string('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz', tn)
+lcd_string(pipe_contents, tn) |
b4cefb5f1a5541b9b7312878790f6fe65412e010 | rst2pdf/utils.py | rst2pdf/utils.py | #$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import sys
from reportlab.platypus import PageBreak, Spacer
from flowables import *
import shlex
from log import log
def parseRaw (data):
'''Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
'''
elements=[]
lines=data.splitlines()
for line in lines:
lexer=shlex.shlex(line)
lexer.whitespace+=','
tokens=list(lexer)
command=tokens[0]
if command == 'PageBreak':
if len(tokens)==1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]),int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now
#def depth (node):
# if node.parent==None:
# return 0
# else:
# return 1+depth(node.parent)
| # -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import sys
from reportlab.platypus import PageBreak, Spacer
from flowables import *
import shlex
from log import log
def parseRaw (data):
'''Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
'''
elements=[]
lines=data.splitlines()
for line in lines:
lexer=shlex.shlex(line)
lexer.whitespace+=','
tokens=list(lexer)
command=tokens[0]
if command == 'PageBreak':
if len(tokens)==1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]),int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now
#def depth (node):
# if node.parent==None:
# return 0
# else:
# return 1+depth(node.parent)
| Fix encoding (thanks to Yasushi Masuda) | Fix encoding (thanks to Yasushi Masuda)
| Python | mit | rafaelmartins/rst2pdf,rafaelmartins/rst2pdf | ---
+++
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$ |
61c6c2687af9de1d5ccded1f1cb866a7638d8c31 | tests/test_director.py | tests/test_director.py | from pydirections.director import Director
from unittest import TestCase
class TestModeValidity(TestCase):
def test_invalid_mode(self):
"""
Tests the is_valid_mode function for an invalid input
"""
default_error_message = "The mode: {0} is invalid"
invalid_mode = "flying"
self.assertFalse(Director.is_valid_mode(invalid_mode), msg=default_error_message.format(invalid_mode))
| from pydirections.director import Director
from unittest import TestCase
import re
class TestModeValidity(TestCase):
def test_invalid_mode(self):
"""
Tests the is_valid_mode function for an invalid input
"""
default_error_message = "The mode: {0} is invalid"
invalid_mode = "flying"
self.assertFalse(Director.is_valid_mode(invalid_mode), msg=default_error_message.format(invalid_mode))
class TestDirectionFetching(TestCase):
"""
This class has test cases to cover the functionality of the fetch_directions function.
"""
def test_invalid_or_missing_params(self):
"""
Tests if the required key-word args are missing or are invalid
"""
with self.assertRaises(ValueError):
Director.fetch_directions()
with self.assertRaises(ValueError):
Director.fetch_directions(origin="123 Fake Street Springfield, MA", dest="End")
def test_invalid_mode(self):
"""
Tests if an exception was raised if an invalid mode was provided
"""
with self.assertRaises(ValueError):
Director.fetch_directions(origin="San Francisco, CA", destination="Mountain View, CA", mode="flying")
| Add tests for data pre-processing in fetch_directions | Add tests for data pre-processing in fetch_directions
| Python | apache-2.0 | apranav19/pydirections | ---
+++
@@ -1,5 +1,6 @@
from pydirections.director import Director
from unittest import TestCase
+import re
class TestModeValidity(TestCase):
def test_invalid_mode(self):
@@ -9,3 +10,24 @@
default_error_message = "The mode: {0} is invalid"
invalid_mode = "flying"
self.assertFalse(Director.is_valid_mode(invalid_mode), msg=default_error_message.format(invalid_mode))
+
+class TestDirectionFetching(TestCase):
+ """
+ This class has test cases to cover the functionality of the fetch_directions function.
+ """
+ def test_invalid_or_missing_params(self):
+ """
+ Tests if the required key-word args are missing or are invalid
+ """
+ with self.assertRaises(ValueError):
+ Director.fetch_directions()
+
+ with self.assertRaises(ValueError):
+ Director.fetch_directions(origin="123 Fake Street Springfield, MA", dest="End")
+
+ def test_invalid_mode(self):
+ """
+ Tests if an exception was raised if an invalid mode was provided
+ """
+ with self.assertRaises(ValueError):
+ Director.fetch_directions(origin="San Francisco, CA", destination="Mountain View, CA", mode="flying") |
a3330105ca36ec758723e2f456ed56e107afec9e | textblob/exceptions.py | textblob/exceptions.py | # -*- coding: utf-8 -*-
MISSING_CORPUS_MESSAGE = """
Looks like you are missing some required data for this feature.
To download the necessary data, simply run
curl https://raw.github.com/sloria/TextBlob/master/download_corpora.py | python
Or use the NLTK downloader to download the missing data: http://nltk.org/data.html
If this doesn't fix the problem, file an issue at https://github.com/sloria/TextBlob/issues.
"""
class TextBlobException(Exception):
'''A TextBlob-related exception.'''
class MissingCorpusException(TextBlobException):
'''Exception thrown when a user tries to use a feature that requires a
dataset or model that the user does not have on their system.
'''
def __init__(self, message=MISSING_CORPUS_MESSAGE, *args, **kwargs):
super(MissingCorpusException, self).__init__(message, *args, **kwargs)
class DeprecationError(TextBlobException):
'''Raised when user uses a deprecated feature.'''
pass
| # -*- coding: utf-8 -*-
MISSING_CORPUS_MESSAGE = """
Looks like you are missing some required data for this feature.
To download the necessary data, simply run
python -m textblob.download_corpora
Or use the NLTK downloader to download the missing data: http://nltk.org/data.html
If this doesn't fix the problem, file an issue at https://github.com/sloria/TextBlob/issues.
"""
class TextBlobException(Exception):
'''A TextBlob-related exception.'''
class MissingCorpusException(TextBlobException):
'''Exception thrown when a user tries to use a feature that requires a
dataset or model that the user does not have on their system.
'''
def __init__(self, message=MISSING_CORPUS_MESSAGE, *args, **kwargs):
super(MissingCorpusException, self).__init__(message, *args, **kwargs)
class DeprecationError(TextBlobException):
'''Raised when user uses a deprecated feature.'''
pass
| Fix download command in error message | Fix download command in error message
| Python | mit | adelq/TextBlob,IrisSteenhout/TextBlob,freakynit/TextBlob,beni55/TextBlob,dipeshtech/TextBlob,sloria/TextBlob,nvoron23/TextBlob,jonmcoe/TextBlob,sargam111/python,jcalbert/TextBlob,Windy-Ground/TextBlob,laugustyniak/TextBlob | ---
+++
@@ -5,7 +5,7 @@
To download the necessary data, simply run
- curl https://raw.github.com/sloria/TextBlob/master/download_corpora.py | python
+ python -m textblob.download_corpora
Or use the NLTK downloader to download the missing data: http://nltk.org/data.html
If this doesn't fix the problem, file an issue at https://github.com/sloria/TextBlob/issues. |
f43248112a867bc819ced274901db25cebe397b7 | roledb/serializers.py | roledb/serializers.py |
from rest_framework import serializers
from roledb.models import User
class UserSerializer(serializers.ModelSerializer):
roles = serializers.SerializerMethodField('role_data')
attributes = serializers.SerializerMethodField('attribute_data')
class Meta:
model = User
fields = ('username', 'roles', 'attributes')
def role_data(self, obj):
data = []
for a in obj.attendances.all():
d = {}
d['school'] = a.school.school_id
d['group'] = a.group
d['role'] = a.role.name
data.append(d)
return data
def attribute_data(self, obj):
data = []
for a in obj.attributes.all():
d = {}
d['name'] = a.attribute.name
d['value'] = a.value
data.append(d)
return data
|
from rest_framework import serializers
from roledb.models import User
class UserSerializer(serializers.ModelSerializer):
roles = serializers.SerializerMethodField('role_data')
attributes = serializers.SerializerMethodField('attribute_data')
class Meta:
model = User
fields = ('username','first_name','last_name','roles','attributes')
def role_data(self, obj):
data = []
for a in obj.attendances.all():
d = {}
d['school'] = a.school.school_id
d['group'] = a.group
d['role'] = a.role.name
data.append(d)
return data
def attribute_data(self, obj):
data = []
for a in obj.attributes.all():
d = {}
d['name'] = a.attribute.name
d['value'] = a.value
data.append(d)
return data
| Send first_name and last_name in JSON. | Send first_name and last_name in JSON.
| Python | mit | educloudalliance/eca-auth-data,educloudalliance/eca-auth-data | ---
+++
@@ -9,7 +9,7 @@
class Meta:
model = User
- fields = ('username', 'roles', 'attributes')
+ fields = ('username','first_name','last_name','roles','attributes')
def role_data(self, obj):
data = [] |
be1e23f068fbc34587caa0a796e259e42ed6f7c6 | utils.py | utils.py | import re
import textwrap
import html2text
text_maker = html2text.HTML2Text()
text_maker.body_width = 0
def strip_html_tags(text):
return re.sub('<[^<]+?>', '', text)
def html_to_md(string, strip_html=True, markdown=False):
if strip_html:
string = strip_html_tags(string)
if markdown:
string = text_maker.handle(string)
return string
def get_formatted_book_data(book_data):
template = textwrap.dedent("""\
*Title:* {0} by {1}
*Rating:* {2} by {3} users
*Description:* {4}
*Link*: [click me]({5})
Tip: {6}""")
title = book_data['title']
authors = book_data['authors']
average_rating = book_data['average_rating']
ratings_count = book_data['ratings_count']
description = html_to_md(book_data.get('description', ''))
url = book_data['url']
tip = 'Use author name also for better search results'
template = template.format(title, authors, average_rating, ratings_count,
description, url, tip)
return template
| import re
import textwrap
import html2text
text_maker = html2text.HTML2Text()
text_maker.body_width = 0
def strip_html_tags(text):
return re.sub('<[^<]+?>', '', text)
def html_to_md(string, strip_html=True, markdown=False):
if not string:
return 'No Description Found'
if strip_html:
string = strip_html_tags(string)
if markdown:
string = text_maker.handle(string)
return string
def get_formatted_book_data(book_data):
template = textwrap.dedent("""\
*Title:* {0} by {1}
*Rating:* {2} by {3} users
*Description:* {4}
*Link*: [click me]({5})
Tip: {6}""")
title = book_data['title']
authors = book_data['authors']
average_rating = book_data['average_rating']
ratings_count = book_data['ratings_count']
description = html_to_md(book_data.get('description', ''))
url = book_data['url']
tip = 'Use author name also for better search results'
template = template.format(title, authors, average_rating, ratings_count,
description, url, tip)
return template
| Handle Nonetype values in `html_to_md` | Handle Nonetype values in `html_to_md`
| Python | mit | avinassh/Laozi,avinassh/Laozi | ---
+++
@@ -12,6 +12,8 @@
def html_to_md(string, strip_html=True, markdown=False):
+ if not string:
+ return 'No Description Found'
if strip_html:
string = strip_html_tags(string)
if markdown: |
529e2b54ed90ab215facb8d8c35d708253b0a6d4 | src/livestreamer/plugins/dmcloud.py | src/livestreamer/plugins/dmcloud.py | from livestreamer.exceptions import NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import HLSStream
from livestreamer.utils import urlget, parse_json
import re
class DMCloud(Plugin):
@classmethod
def can_handle_url(self, url):
return "api.dmcloud.net" in url
def _get_streams(self):
self.logger.debug("Fetching stream info")
res = urlget(self.url)
match = re.search("var info = (.*);", res.text)
if not match:
raise NoStreamsError(self.url)
json = parse_json(match.group(1))
if not "ios_url" in json:
raise NoStreamsError(self.url)
streams = HLSStream.parse_variant_playlist(self.session, json["ios_url"])
return streams
__plugin__ = DMCloud
| from livestreamer.exceptions import NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import RTMPStream, HLSStream
from livestreamer.utils import parse_json, rtmpparse, swfdecompress, urlget
import re
class DMCloud(Plugin):
@classmethod
def can_handle_url(self, url):
return "api.dmcloud.net" in url
def _get_rtmp_streams(self, swfurl):
if not RTMPStream.is_usable(self.session):
raise NoStreamsError(self.url)
self.logger.debug("Fetching RTMP stream info")
res = urlget(swfurl)
swf = swfdecompress(res.content)
match = re.search("customURL[^h]+(https://.*?)\\\\", swf)
if not match:
raise NoStreamsError(self.url)
res = urlget(match.group(1))
rtmp, playpath = rtmpparse(res.text)
params = {
"rtmp": rtmp,
"pageUrl": self.url,
"playpath": playpath,
"live": True
}
match = re.search("file[^h]+(https?://.*?.swf)", swf)
if match:
params["swfUrl"] = match.group(1)
return RTMPStream(self.session, params)
def _get_streams(self):
self.logger.debug("Fetching stream info")
res = urlget(self.url)
match = re.search("var info = (.*);", res.text)
if not match:
raise NoStreamsError(self.url)
json = parse_json(match.group(1))
if not isinstance(json, dict):
return
ios_url = json.get("ios_url")
swf_url = json.get("swf_url")
streams = {}
if ios_url:
hls = HLSStream.parse_variant_playlist(self.session, ios_url)
streams.update(hls)
if swf_url:
try:
streams["live"] = self._get_rtmp_streams(swf_url)
except NoStreamsError:
pass
return streams
__plugin__ = DMCloud
| Add Support For RTMP Streams | DMCloud: Add Support For RTMP Streams
| Python | bsd-2-clause | flijloku/livestreamer,gtmanfred/livestreamer,blxd/livestreamer,beardypig/streamlink,back-to/streamlink,lyhiving/livestreamer,Dobatymo/livestreamer,Saturn/livestreamer,bastimeyer/streamlink,derrod/livestreamer,hmit/livestreamer,chhe/livestreamer,wolftankk/livestreamer,sbstp/streamlink,wlerin/streamlink,bastimeyer/streamlink,gtmanfred/livestreamer,Masaz-/livestreamer,chrippa/livestreamer,Masaz-/livestreamer,streamlink/streamlink,Saturn/livestreamer,charmander/livestreamer,chhe/streamlink,Feverqwe/livestreamer,melmorabity/streamlink,programming086/livestreamer,wlerin/streamlink,streamlink/streamlink,okaywit/livestreamer,okaywit/livestreamer,intact/livestreamer,hmit/livestreamer,gravyboat/streamlink,mmetak/streamlink,wolftankk/livestreamer,programming086/livestreamer,jtsymon/livestreamer,fishscene/streamlink,beardypig/streamlink,Feverqwe/livestreamer,chrippa/livestreamer,caorong/livestreamer,ethanhlc/streamlink,breunigs/livestreamer,breunigs/livestreamer,melmorabity/streamlink,jtsymon/livestreamer,caorong/livestreamer,flijloku/livestreamer,fishscene/streamlink,lyhiving/livestreamer,chhe/streamlink,derrod/livestreamer,chhe/livestreamer,Klaudit/livestreamer,sbstp/streamlink,blxd/livestreamer,mmetak/streamlink,javiercantero/streamlink,charmander/livestreamer,gravyboat/streamlink,javiercantero/streamlink,Dobatymo/livestreamer,ethanhlc/streamlink,back-to/streamlink,Klaudit/livestreamer,intact/livestreamer | ---
+++
@@ -1,7 +1,7 @@
from livestreamer.exceptions import NoStreamsError
from livestreamer.plugin import Plugin
-from livestreamer.stream import HLSStream
-from livestreamer.utils import urlget, parse_json
+from livestreamer.stream import RTMPStream, HLSStream
+from livestreamer.utils import parse_json, rtmpparse, swfdecompress, urlget
import re
@@ -10,6 +10,35 @@
@classmethod
def can_handle_url(self, url):
return "api.dmcloud.net" in url
+
+ def _get_rtmp_streams(self, swfurl):
+ if not RTMPStream.is_usable(self.session):
+ raise NoStreamsError(self.url)
+
+ self.logger.debug("Fetching RTMP stream info")
+
+ res = urlget(swfurl)
+ swf = swfdecompress(res.content)
+ match = re.search("customURL[^h]+(https://.*?)\\\\", swf)
+
+ if not match:
+ raise NoStreamsError(self.url)
+
+ res = urlget(match.group(1))
+ rtmp, playpath = rtmpparse(res.text)
+
+ params = {
+ "rtmp": rtmp,
+ "pageUrl": self.url,
+ "playpath": playpath,
+ "live": True
+ }
+
+ match = re.search("file[^h]+(https?://.*?.swf)", swf)
+ if match:
+ params["swfUrl"] = match.group(1)
+
+ return RTMPStream(self.session, params)
def _get_streams(self):
self.logger.debug("Fetching stream info")
@@ -20,10 +49,22 @@
raise NoStreamsError(self.url)
json = parse_json(match.group(1))
- if not "ios_url" in json:
- raise NoStreamsError(self.url)
+ if not isinstance(json, dict):
+ return
- streams = HLSStream.parse_variant_playlist(self.session, json["ios_url"])
+ ios_url = json.get("ios_url")
+ swf_url = json.get("swf_url")
+ streams = {}
+
+ if ios_url:
+ hls = HLSStream.parse_variant_playlist(self.session, ios_url)
+ streams.update(hls)
+
+ if swf_url:
+ try:
+ streams["live"] = self._get_rtmp_streams(swf_url)
+ except NoStreamsError:
+ pass
return streams
|
9299b8216c809e795af5f22457fe7c9b615195bb | search/searchers/__init__.py | search/searchers/__init__.py | from search.searchers.instagram import MediaSearchInstagram
from search.searchers.vkontakte import MediaSearchVK
from search.searchers.youtube import MediaSearchYouTube
from search.searchers.podacisearcher import DocumentSearchPodaci
from search.searchers.datatracker import DocumentSearchDataTracker
from search.searchers.opencorporates import EntitySearchOpenCorporates
SEARCHERS = [MediaSearchInstagram, MediaSearchVK, MediaSearchYouTube,
DocumentSearchPodaci, EntitySearchOpenCorporates,
DocumentSearchDataTracker]
| from search.searchers.instagram import MediaSearchInstagram
from search.searchers.vkontakte import MediaSearchVK
# from search.searchers.youtube import MediaSearchYouTube
from search.searchers.podacisearcher import DocumentSearchPodaci
from search.searchers.datatracker import DocumentSearchDataTracker
from search.searchers.opencorporates import EntitySearchOpenCorporates
SEARCHERS = [MediaSearchInstagram, MediaSearchVK, # MediaSearchYouTube,
DocumentSearchPodaci, EntitySearchOpenCorporates,
DocumentSearchDataTracker]
| Disable youtube searcher as key has been revoked. | Disable youtube searcher as key has been revoked. | Python | mit | occrp/id-backend | ---
+++
@@ -1,10 +1,10 @@
from search.searchers.instagram import MediaSearchInstagram
from search.searchers.vkontakte import MediaSearchVK
-from search.searchers.youtube import MediaSearchYouTube
+# from search.searchers.youtube import MediaSearchYouTube
from search.searchers.podacisearcher import DocumentSearchPodaci
from search.searchers.datatracker import DocumentSearchDataTracker
from search.searchers.opencorporates import EntitySearchOpenCorporates
-SEARCHERS = [MediaSearchInstagram, MediaSearchVK, MediaSearchYouTube,
+SEARCHERS = [MediaSearchInstagram, MediaSearchVK, # MediaSearchYouTube,
DocumentSearchPodaci, EntitySearchOpenCorporates,
DocumentSearchDataTracker] |
f56ee0bd67652d22de75989cfd7a752641aee91e | ex11+12.py | ex11+12.py | # Upper is ex11, lower part is ex 12
# Both print out are same, but ex11 needs 8 lines,
# ex 12 just need it for 4 lines.
print "How old are you?", # Becasue can't use print and value in same line
age = raw_input() # That's why need two lines to do so.
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r, old, %r tall and %r heavy." % (age, height, weight)
#---------------------- this is ex11.py
# let's compare!!
age = raw_input("How old are you? ") # when typed raw_input() the (), which are
height = raw_input("How tall are you? ") #similiar with " %s %s" % (x, y)
weight = raw_input("How much do you weight? ") # that's why the () can put the prompt.
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# as teacher said print and raw_input can't put @ the same line.
# if want to use print and raw_input need to use 2 line!!
# otherwise need to make it like the ex12.py age = raw_input("How old are you? ")
# -------
# The difference btw ex11 and ex12 is the use of prompt string as an
# argument for `raw_input` function
prompt_string = "How old are you? "
print prompt_string
age = raw_input()
# vs
age = raw_input(prompt_string)
| # Upper is ex11, lower part is ex 12
# Both print out are same, but ex11 needs 8 lines,
# ex 12 just need it for 4 lines.
print "How old are you?", #Becasue can't use print and value in same line
age = raw_input() # That's why need two lines to do so.
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r, old, %r tall and %r heavy." % (
age, height, weight)
#---------------------- this is ex11.py
# let's compare!!
age = raw_input % ("How old are you? ") # when typed raw_input() the (), which are
height = raw_input % ("How tall are you? ") #similiar with " %s %s" % (x, y)
weight = raw_input("How much do you weight? ") # that's why the () can put the prompt.
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# as teacher said print and raw_input can't put @ the same line.
# if want to use print and raw_input need to use 2 line!!
# otherwise need to make it like the ex12.py age = raw_input("How old are you? ")
| Revert "Review ex11 and ex12" | Revert "Review ex11 and ex12"
This reverts commit e5fe21acd40e9ebeae548e906747702783058d06.
| Python | mpl-2.0 | joievoyage/LPTHW_EXsss,vanzaj/LPTHW_EXsss | ---
+++
@@ -3,20 +3,21 @@
# ex 12 just need it for 4 lines.
-print "How old are you?", # Becasue can't use print and value in same line
-age = raw_input() # That's why need two lines to do so.
+print "How old are you?", #Becasue can't use print and value in same line
+age = raw_input() # That's why need two lines to do so.
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
-print "So, you're %r, old, %r tall and %r heavy." % (age, height, weight)
+print "So, you're %r, old, %r tall and %r heavy." % (
+ age, height, weight)
#---------------------- this is ex11.py
# let's compare!!
-age = raw_input("How old are you? ") # when typed raw_input() the (), which are
-height = raw_input("How tall are you? ") #similiar with " %s %s" % (x, y)
+age = raw_input % ("How old are you? ") # when typed raw_input() the (), which are
+height = raw_input % ("How tall are you? ") #similiar with " %s %s" % (x, y)
weight = raw_input("How much do you weight? ") # that's why the () can put the prompt.
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
@@ -25,15 +26,3 @@
# if want to use print and raw_input need to use 2 line!!
# otherwise need to make it like the ex12.py age = raw_input("How old are you? ")
-# -------
-# The difference btw ex11 and ex12 is the use of prompt string as an
-# argument for `raw_input` function
-
-prompt_string = "How old are you? "
-
-print prompt_string
-age = raw_input()
-
-# vs
-
-age = raw_input(prompt_string) |
94ec32865674f9f9b0dcecafec73d1e2930ccfdb | runtime/__init__.py | runtime/__init__.py | import builtins
import operator
import functools
from ..compile import varary
builtins.__dict__.update({
# Runtime counterparts of some stuff in `Compiler.builtins`.
'$': lambda f, *xs: f(*xs)
, ':': lambda f, *xs: f(*xs)
, ',': lambda a, *xs: (a,) + xs
, '<': operator.lt
, '<=': operator.le
, '==': operator.eq
, '!=': operator.ne
, '>': operator.gt
, '>=': operator.ge
, 'is': operator.is_
, 'in': lambda a, b: a in b
, 'not': operator.not_
, '~': operator.invert
, '+': varary(operator.pos, operator.add)
, '-': varary(operator.neg, operator.sub)
, '*': operator.mul
, '**': operator.pow
, '/': operator.truediv
, '//': operator.floordiv
, '%': operator.mod
, '!!': operator.getitem
, '&': operator.and_
, '^': operator.xor
, '|': operator.or_
, '<<': operator.lshift
, '>>': operator.rshift
# Useful stuff.
, 'foldl': functools.reduce
, '~:': functools.partial
})
| import builtins
import operator
import functools
from ..compile import varary
builtins.__dict__.update({
# Runtime counterparts of some stuff in `Compiler.builtins`.
'$': lambda f, *xs: f(*xs)
, ':': lambda f, *xs: f(*xs)
, ',': lambda a, *xs: (a,) + xs
, '<': operator.lt
, '<=': operator.le
, '==': operator.eq
, '!=': operator.ne
, '>': operator.gt
, '>=': operator.ge
, 'is': operator.is_
, 'in': lambda a, b: a in b
, 'not': operator.not_
, '~': operator.invert
, '+': varary(operator.pos, operator.add)
, '-': varary(operator.neg, operator.sub)
, '*': operator.mul
, '**': operator.pow
, '/': operator.truediv
, '//': operator.floordiv
, '%': operator.mod
, '!!': operator.getitem
, '&': operator.and_
, '^': operator.xor
, '|': operator.or_
, '<<': operator.lshift
, '>>': operator.rshift
# Useful stuff.
, 'foldl': functools.reduce
, '~:': functools.partial
# Not so useful stuff.
, 'if': lambda cond, then, else_=None: then if cond else else_
})
| Add a runtime version of if. | Add a runtime version of if.
elif/else can't have runtime versions 'cause they are purely syntactic.
| Python | mit | pyos/dg | ---
+++
@@ -39,4 +39,7 @@
# Useful stuff.
, 'foldl': functools.reduce
, '~:': functools.partial
+
+ # Not so useful stuff.
+ , 'if': lambda cond, then, else_=None: then if cond else else_
}) |
3bade9d6258fb8df849b32f68de6343cfdd83720 | saltapi/__init__.py | saltapi/__init__.py | '''
Make api awesomeness
'''
# Import python libs
#
# Import Salt libs
import salt.utils
import salt.client
import salt.runner
class API(object):
'''
'''
def __init__(self, opts):
self.opts = opts
self.local = salt.client.LocalClient(opts['conf_file'])
def run(self, low):
'''
'''
if not 'client' in low:
raise SaltException('No client specified')
l_fun = getattr(self, low['client'])
fcall = salt.utils.format_call(l_fun, low)
if 'kwargs' in fcall:
ret = l_fun(*fcall['args'], **fcall['kwargs'])
else:
ret = l_fun(*f_call['args'])
return ret
def cmd(
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
timeout=None,
**kwargs):
'''
Wrap running a job
'''
return self.local.run_job(
tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs).get('jid')
def runner(fun, **kwargs):
'''
'''
runner = salt.runner.RunnerClient(opts)
return salt.runner.low(fun, kwargs)
| Add API wrapper to saltapi | Add API wrapper to saltapi
| Python | apache-2.0 | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | ---
+++
@@ -0,0 +1,57 @@
+'''
+Make api awesomeness
+'''
+
+# Import python libs
+#
+# Import Salt libs
+import salt.utils
+import salt.client
+import salt.runner
+
+class API(object):
+ '''
+ '''
+ def __init__(self, opts):
+ self.opts = opts
+ self.local = salt.client.LocalClient(opts['conf_file'])
+
+ def run(self, low):
+ '''
+ '''
+ if not 'client' in low:
+ raise SaltException('No client specified')
+ l_fun = getattr(self, low['client'])
+ fcall = salt.utils.format_call(l_fun, low)
+ if 'kwargs' in fcall:
+ ret = l_fun(*fcall['args'], **fcall['kwargs'])
+ else:
+ ret = l_fun(*f_call['args'])
+ return ret
+
+ def cmd(
+ tgt,
+ fun,
+ arg=(),
+ expr_form='glob',
+ ret='',
+ timeout=None,
+ **kwargs):
+ '''
+ Wrap running a job
+ '''
+ return self.local.run_job(
+ tgt,
+ fun,
+ arg,
+ expr_form,
+ ret,
+ timeout,
+ **kwargs).get('jid')
+
+ def runner(fun, **kwargs):
+ '''
+ '''
+ runner = salt.runner.RunnerClient(opts)
+ return salt.runner.low(fun, kwargs)
+ | |
ceef52c348bb3f4477c3d031d024c89eb7379039 | ooni/resources/cli.py | ooni/resources/cli.py | import sys
from twisted.python import usage
from ooni.resources import __version__
from ooni.resources import update
class Options(usage.Options):
synopsis = """%s"""
optParameters = []
def opt_version(self):
print("ooniresources version: %s" % __version__)
sys.exit(0)
def run():
options = Options()
try:
options.parseOptions()
except usage.UsageError as error_message:
print "%s: %s" % (sys.argv[0], error_message)
print "%s: Try --help for usage details." % (sys.argv[0])
sys.exit(1)
return update.download_inputs()
| import sys
from twisted.python import usage
from ooni.resources import __version__
from ooni.resources import update
class Options(usage.Options):
synopsis = """%s"""
optFlags = [
["update-inputs", None, "Update the resources needed for inputs"]
]
optParameters = []
def opt_version(self):
print("ooniresources version: %s" % __version__)
sys.exit(0)
def run():
options = Options()
try:
options.parseOptions()
except usage.UsageError as error_message:
print "%s: %s" % (sys.argv[0], error_message)
print "%s: Try --help for usage details." % (sys.argv[0])
sys.exit(1)
if options['update-inputs']:
return update.download_inputs()
print "%s: no command specified" % sys.argv[0]
print "%s: Try --help for usage details." % (sys.argv[0])
sys.exit(1)
| Make the update-inputs command an option | Make the update-inputs command an option
| Python | bsd-2-clause | Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe | ---
+++
@@ -9,6 +9,9 @@
class Options(usage.Options):
synopsis = """%s"""
+ optFlags = [
+ ["update-inputs", None, "Update the resources needed for inputs"]
+ ]
optParameters = []
def opt_version(self):
@@ -25,4 +28,9 @@
print "%s: Try --help for usage details." % (sys.argv[0])
sys.exit(1)
- return update.download_inputs()
+ if options['update-inputs']:
+ return update.download_inputs()
+
+ print "%s: no command specified" % sys.argv[0]
+ print "%s: Try --help for usage details." % (sys.argv[0])
+ sys.exit(1) |
a9b225d033d0462f47e4adecef2ef90fc0bf2318 | docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from gravity import __version__ # noqa: E402
project = 'Gravity'
copyright = '2022, The Galaxy Project'
author = 'The Galaxy Project'
release = '__version__'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = []
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'alabaster'
html_static_path = ['_static']
| # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from gravity import __version__ # noqa: E402
project = 'Gravity'
copyright = '2022, The Galaxy Project'
author = 'The Galaxy Project'
release = '__version__'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = []
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'alabaster'
html_static_path = ['_static']
| Correct path to parent dir | Correct path to parent dir
| Python | mit | galaxyproject/gravity | ---
+++
@@ -8,7 +8,7 @@
import os
import sys
-sys.path.insert(0, os.path.dirname(__file__))
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from gravity import __version__ # noqa: E402
|
8dd41fab9a43ef43d5f2dc27e11bdbda3c23bc56 | soapbox/tests/urls.py | soapbox/tests/urls.py | from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/bar/$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/bar/baz/$',
TemplateView.as_view(
template_name='soapboxtest/test_context_processor.html')),
url(r'^fail/$',
TemplateView.as_view(
template_name='soapboxtest/test_fail_syntax.html')),
url(r'^bad-url-var/$',
TemplateView.as_view(
template_name='soapboxtest/test_bad_variable.html')),
)
| from django.conf.urls import url
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/bar/$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
url(r'^foo/bar/baz/$',
TemplateView.as_view(
template_name='soapboxtest/test_context_processor.html')),
url(r'^fail/$',
TemplateView.as_view(
template_name='soapboxtest/test_fail_syntax.html')),
url(r'^bad-url-var/$',
TemplateView.as_view(
template_name='soapboxtest/test_bad_variable.html')),
]
| Stop using patterns() in the test URLs. | Stop using patterns() in the test URLs.
| Python | bsd-3-clause | ubernostrum/django-soapbox,ubernostrum/django-soapbox | ---
+++
@@ -1,9 +1,8 @@
-from django.conf.urls import patterns, url
+from django.conf.urls import url
from django.views.generic import TemplateView
-urlpatterns = patterns(
- '',
+urlpatterns = [
url(r'^$',
TemplateView.as_view(
template_name='soapboxtest/test_success.html')),
@@ -22,4 +21,4 @@
url(r'^bad-url-var/$',
TemplateView.as_view(
template_name='soapboxtest/test_bad_variable.html')),
-)
+] |
4418a08553572ca18187472cc32e5044229333f2 | django/applications/catmaid/management/commands/catmaid_set_user_profiles_to_default.py | django/applications/catmaid/management/commands/catmaid_set_user_profiles_to_default.py | from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
| from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
from optparse import make_option
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
option_list = NoArgsCommand.option_list + (
make_option('--update-anon-user', dest='update-anon-user',
default=False, action='store_true',
help='Update also the profile of the anonymous user'),
)
def handle_noargs(self, **options):
update_anon_user = 'update-anon-user' in options
for u in User.objects.all():
# Ignore the anonymous user by default
if u.id == settings.ANONYMOUS_USER_ID and not update_anon_user:
continue
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
| Add explicit anonymous user update parameter to user profile command | Add explicit anonymous user update parameter to user profile command
The set_user_profiles_to_default management command should only update
the anonymous user if explicitely stated. This commit adds the
'--update-anon-user' switch to do let the user state that also the
anonymous user's profile should be updated with the current default
values.
| Python | agpl-3.0 | fzadow/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID | ---
+++
@@ -2,11 +2,23 @@
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
+from optparse import make_option
+
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
+ option_list = NoArgsCommand.option_list + (
+ make_option('--update-anon-user', dest='update-anon-user',
+ default=False, action='store_true',
+ help='Update also the profile of the anonymous user'),
+ )
+
def handle_noargs(self, **options):
+ update_anon_user = 'update-anon-user' in options
for u in User.objects.all():
+ # Ignore the anonymous user by default
+ if u.id == settings.ANONYMOUS_USER_ID and not update_anon_user:
+ continue
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL |
69e6db7a4a28ff1f50bd4f12f550a2b65f05eb38 | utils/dusk/__init__.py | utils/dusk/__init__.py | """
'Dusk' command system for Amethyst.
Based loosely (heh) off of discord.py's ext command system.
TODO: refactor arg parsing probably.
"""
from .context import Context # NOQA
from .command import * # NOQA
from .command_holder import CommandHolder # NOQA
from .constants import * # NOQA
| """
'Dusk' command system for Amethyst.
Based loosely (heh) off of discord.py's ext command system.
"""
from .context import Context # NOQA
from .command import * # NOQA
from .command_holder import CommandHolder # NOQA
from .constants import * # NOQA
__version__ = "1.0.0" | Remove obsolete TODO and add version | Remove obsolete TODO and add version
| Python | mit | awau/Amethyst,HexadecimalPython/Xeili | ---
+++
@@ -1,11 +1,11 @@
"""
'Dusk' command system for Amethyst.
Based loosely (heh) off of discord.py's ext command system.
-
-TODO: refactor arg parsing probably.
"""
from .context import Context # NOQA
from .command import * # NOQA
from .command_holder import CommandHolder # NOQA
from .constants import * # NOQA
+
+__version__ = "1.0.0" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.