commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
091a68fef4bc2fcaf87279d36ea3ebec87bac071
|
astropy/vo/samp/__init__.py
|
astropy/vo/samp/__init__.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP) <www.ivoa.net/samp>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
Correct URL and add http in SAMP docstring
|
Correct URL and add http in SAMP docstring
|
Python
|
bsd-3-clause
|
lpsinger/astropy,lpsinger/astropy,stargaser/astropy,larrybradley/astropy,mhvk/astropy,MSeifert04/astropy,larrybradley/astropy,saimn/astropy,pllim/astropy,dhomeier/astropy,astropy/astropy,tbabej/astropy,StuartLittlefair/astropy,dhomeier/astropy,lpsinger/astropy,dhomeier/astropy,joergdietrich/astropy,tbabej/astropy,MSeifert04/astropy,StuartLittlefair/astropy,DougBurke/astropy,AustereCuriosity/astropy,stargaser/astropy,dhomeier/astropy,larrybradley/astropy,saimn/astropy,aleksandr-bakanov/astropy,astropy/astropy,tbabej/astropy,StuartLittlefair/astropy,tbabej/astropy,AustereCuriosity/astropy,pllim/astropy,saimn/astropy,pllim/astropy,StuartLittlefair/astropy,mhvk/astropy,joergdietrich/astropy,bsipocz/astropy,kelle/astropy,mhvk/astropy,AustereCuriosity/astropy,funbaker/astropy,tbabej/astropy,dhomeier/astropy,lpsinger/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,MSeifert04/astropy,DougBurke/astropy,pllim/astropy,mhvk/astropy,astropy/astropy,saimn/astropy,AustereCuriosity/astropy,saimn/astropy,stargaser/astropy,funbaker/astropy,kelle/astropy,joergdietrich/astropy,kelle/astropy,DougBurke/astropy,StuartLittlefair/astropy,pllim/astropy,joergdietrich/astropy,funbaker/astropy,bsipocz/astropy,stargaser/astropy,astropy/astropy,astropy/astropy,MSeifert04/astropy,aleksandr-bakanov/astropy,bsipocz/astropy,lpsinger/astropy,joergdietrich/astropy,funbaker/astropy,kelle/astropy,mhvk/astropy,larrybradley/astropy,kelle/astropy,aleksandr-bakanov/astropy,AustereCuriosity/astropy,bsipocz/astropy,DougBurke/astropy
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP) <www.ivoa.net/samp>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
Correct URL and add http in SAMP docstring
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
<commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP) <www.ivoa.net/samp>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
<commit_msg>Correct URL and add http in SAMP docstring<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP) <www.ivoa.net/samp>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
Correct URL and add http in SAMP docstring# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
<commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP) <www.ivoa.net/samp>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
<commit_msg>Correct URL and add http in SAMP docstring<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from ... import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.vo.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
conf = Conf()
|
b3f206d9b8cbde42ce2def6d8b9d8c1d90abfeeb
|
pyexperiment/utils/interactive.py
|
pyexperiment/utils/interactive.py
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if not 'state' in kwargs:
kwargs['state'] = state
if not 'conf' in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
Fix style: not foo in [] => foo not in
|
Fix style: not foo in [] => foo not in
|
Python
|
mit
|
duerrp/pyexperiment,kinverarity1/pyexperiment,DeercoderResearch/pyexperiment,shaunstanislaus/pyexperiment,shaunstanislaus/pyexperiment,kinverarity1/pyexperiment,kinverarity1/pyexperiment,duerrp/pyexperiment,DeercoderResearch/pyexperiment,shaunstanislaus/pyexperiment,duerrp/pyexperiment,DeercoderResearch/pyexperiment,DeercoderResearch/pyexperiment,kinverarity1/pyexperiment,shaunstanislaus/pyexperiment
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if not 'state' in kwargs:
kwargs['state'] = state
if not 'conf' in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
Fix style: not foo in [] => foo not in
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
<commit_before>"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if not 'state' in kwargs:
kwargs['state'] = state
if not 'conf' in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
<commit_msg>Fix style: not foo in [] => foo not in<commit_after>
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if not 'state' in kwargs:
kwargs['state'] = state
if not 'conf' in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
Fix style: not foo in [] => foo not in"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
<commit_before>"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if not 'state' in kwargs:
kwargs['state'] = state
if not 'conf' in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
<commit_msg>Fix style: not foo in [] => foo not in<commit_after>"""Provides helper functions for interactive prompts
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from pyexperiment import state
from pyexperiment import conf
def embed_interactive(**kwargs):
"""Embed an interactive terminal into a running python process
"""
if 'state' not in kwargs:
kwargs['state'] = state
if 'conf' not in kwargs:
kwargs['conf'] = conf
try:
import IPython
ipython_config = IPython.Config()
ipython_config.TerminalInteractiveShell.confirm_exit = False
if IPython.__version__ == '1.2.1':
IPython.embed(config=ipython_config,
banner1='',
user_ns=kwargs)
else:
IPython.embed(config=ipython_config,
banner1='',
local_ns=kwargs)
except ImportError:
import readline # pylint: disable=unused-variable
import code
code.InteractiveConsole(kwargs).interact()
|
b423d3f0da64a1d6781128653bd6230ac462ad85
|
ava/text_to_speech/__init__.py
|
ava/text_to_speech/__init__.py
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = os.environ['TMP'] + str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
Put file created by the gtts on tmp folder
|
Put file created by the gtts on tmp folder
|
Python
|
mit
|
ava-project/AVA
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
Put file created by the gtts on tmp folder
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = os.environ['TMP'] + str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
<commit_before>import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
<commit_msg>Put file created by the gtts on tmp folder<commit_after>
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = os.environ['TMP'] + str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
Put file created by the gtts on tmp folderimport time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = os.environ['TMP'] + str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
<commit_before>import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
<commit_msg>Put file created by the gtts on tmp folder<commit_after>import time
import os
from tempfile import NamedTemporaryFile
from sys import platform as _platform
from gtts import gTTS
from pygame import mixer
from .playsound import playsound
from ..components import _BaseComponent
class TextToSpeech(_BaseComponent):
def __init__(self, queues):
super().__init__(queues)
self.queue_tts = None
def setup(self):
self.queue_tts = self._queues['QueueTextToSpeech']
def run(self):
while self._is_init:
sentence = self.queue_tts.get()
if sentence is None:
break
print('To say out loud : {}'.format(sentence))
tts = gTTS(text=sentence, lang='en')
if _platform == "darwin":
with NamedTemporaryFile() as audio_file:
tts.write_to_fp(audio_file)
audio_file.seek(0)
playsound(audio_file.name)
else:
filename = os.environ['TMP'] + str(time.time()).split('.')[0] + ".mp3"
tts.save(filename)
if _platform == "linux" or _platform == "linux2":
mixer.init()
mixer.music.load(filename)
mixer.music.play()
else:
playsound(filename)
os.remove(filename)
self.queue_tts.task_done()
def stop(self):
print('Stopping {0}...'.format(self.__class__.__name__))
self._is_init = False
self.queue_tts.put(None)
|
608824b396c75c4c82579133d2291eab5491fab9
|
src/odin/fields/future.py
|
src/odin/fields/future.py
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField", )
ET = TypeVar('ET', Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (ET, **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages['invalid_choice'] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any, Type # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField",)
ET = TypeVar("ET", Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (Type[ET], **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages["invalid_choice"] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
Use Type[ET] as enum passed to init is a Type
|
Use Type[ET] as enum passed to init is a Type
|
Python
|
bsd-3-clause
|
python-odin/odin
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField", )
ET = TypeVar('ET', Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (ET, **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages['invalid_choice'] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
Use Type[ET] as enum passed to init is a Type
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any, Type # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField",)
ET = TypeVar("ET", Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (Type[ET], **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages["invalid_choice"] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
<commit_before>from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField", )
ET = TypeVar('ET', Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (ET, **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages['invalid_choice'] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
<commit_msg>Use Type[ET] as enum passed to init is a Type<commit_after>
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any, Type # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField",)
ET = TypeVar("ET", Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (Type[ET], **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages["invalid_choice"] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField", )
ET = TypeVar('ET', Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (ET, **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages['invalid_choice'] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
Use Type[ET] as enum passed to init is a Typefrom __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any, Type # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField",)
ET = TypeVar("ET", Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (Type[ET], **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages["invalid_choice"] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
<commit_before>from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField", )
ET = TypeVar('ET', Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (ET, **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages['invalid_choice'] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
<commit_msg>Use Type[ET] as enum passed to init is a Type<commit_after>from __future__ import absolute_import
from enum import Enum
from typing import TypeVar, Optional, Any, Type # noqa
from odin.exceptions import ValidationError
from . import Field
__all__ = ("EnumField",)
ET = TypeVar("ET", Enum, Enum)
class EnumField(Field):
"""
Field for handling Python enums.
"""
data_type_name = "Enum"
def __init__(self, enum, **options):
# type: (Type[ET], **Any) -> None
# Generate choices structure from choices
choices = options.pop("choices", None)
options["choices"] = tuple((e, e.name) for e in choices or enum)
super(EnumField, self).__init__(**options)
self.enum = enum
@property
def choices_doc_text(self):
"""
Choices converted for documentation purposes.
"""
return tuple((v.value, n) for v, n in self.choices)
def to_python(self, value):
# type: (Any) -> Optional[ET]
if value is None:
return
# Attempt to convert
try:
return self.enum(value)
except ValueError:
# If value is an empty string return None
# Do this check here to support enums that define an option using
# an empty string.
if value is "":
return
raise ValidationError(self.error_messages["invalid_choice"] % value)
def prepare(self, value):
# type: (Optional[ET]) -> Any
if value in self.enum:
return value.value
|
9bc5ec59224116e2092f0c2e02831c8276360910
|
providers/output/terminal.py
|
providers/output/terminal.py
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: data.get("filmtipset_my_grade", 0), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: (data.get("filmtipset_my_grade", 0), data.get("imdb_rating", 0)), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
Sort on imdb rating if filmtipset ratings are the same.
|
Sort on imdb rating if filmtipset ratings are the same.
|
Python
|
mit
|
EmilStenstrom/nephele
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: data.get("filmtipset_my_grade", 0), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
Sort on imdb rating if filmtipset ratings are the same.
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: (data.get("filmtipset_my_grade", 0), data.get("imdb_rating", 0)), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
<commit_before>from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: data.get("filmtipset_my_grade", 0), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
<commit_msg>Sort on imdb rating if filmtipset ratings are the same.<commit_after>
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: (data.get("filmtipset_my_grade", 0), data.get("imdb_rating", 0)), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: data.get("filmtipset_my_grade", 0), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
Sort on imdb rating if filmtipset ratings are the same.from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: (data.get("filmtipset_my_grade", 0), data.get("imdb_rating", 0)), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
<commit_before>from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: data.get("filmtipset_my_grade", 0), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
<commit_msg>Sort on imdb rating if filmtipset ratings are the same.<commit_after>from __future__ import print_function
import textwrap
from providers.output.provider import OutputProvider
IDENTIFIER = "Terminal"
class Provider(OutputProvider):
def process_data(self, movie_data):
movie_data = filter(lambda data: data.get("filmtipset_my_grade_type", "none") != "seen", movie_data)
movie_data = sorted(movie_data, key=lambda data: (data.get("filmtipset_my_grade", 0), data.get("imdb_rating", 0)), reverse=True)
return movie_data
def output(self, movie_data):
movie_data = self.process_data(movie_data)
print()
for data in movie_data[:10]:
print("%s (Filmtipset: %s, IMDB: %s)" % (
data["title"],
data.get("filmtipset_my_grade", "-"),
data.get("imdb_rating", "-"),
))
print(" [Genre: %s, Country: %s, Year: %s]" % (
", ".join(data.get("genre", "-")),
data.get("country", "-"),
data.get("year", "-"),
))
plot = data.get("plot", None)
if plot:
text = textwrap.wrap('Plot: "' + data.get("plot", "-") + '"', width=80, initial_indent=" ", subsequent_indent=" ")
print("\n".join(text))
print()
|
53fbfc19090ce9e2447d3811ef5807422b71f426
|
indico/modules/events/registration/views.py
|
indico/modules/events/registration/views.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration'
sidemenu_option = 'registration'
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls()
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration/'
sidemenu_option = 'registration'
def getJSFiles(self):
return (WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls() +
self._asset_env['indico_regform'].urls())
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + self._asset_env['registrationform_sass'].urls()
|
Include static files specific to regform
|
Include static files specific to regform
|
Python
|
mit
|
DirkHoffmann/indico,OmeGak/indico,OmeGak/indico,mvidalgarcia/indico,DirkHoffmann/indico,indico/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,mic4ael/indico,DirkHoffmann/indico,pferreir/indico,mic4ael/indico,indico/indico,pferreir/indico,mvidalgarcia/indico,OmeGak/indico,pferreir/indico,mic4ael/indico,DirkHoffmann/indico,mic4ael/indico,ThiefMaster/indico,indico/indico,ThiefMaster/indico,ThiefMaster/indico,mvidalgarcia/indico,indico/indico,OmeGak/indico
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration'
sidemenu_option = 'registration'
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls()
Include static files specific to regform
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration/'
sidemenu_option = 'registration'
def getJSFiles(self):
return (WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls() +
self._asset_env['indico_regform'].urls())
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + self._asset_env['registrationform_sass'].urls()
|
<commit_before># This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration'
sidemenu_option = 'registration'
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls()
<commit_msg>Include static files specific to regform<commit_after>
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration/'
sidemenu_option = 'registration'
def getJSFiles(self):
return (WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls() +
self._asset_env['indico_regform'].urls())
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + self._asset_env['registrationform_sass'].urls()
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration'
sidemenu_option = 'registration'
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls()
Include static files specific to regform# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration/'
sidemenu_option = 'registration'
def getJSFiles(self):
return (WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls() +
self._asset_env['indico_regform'].urls())
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + self._asset_env['registrationform_sass'].urls()
|
<commit_before># This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration'
sidemenu_option = 'registration'
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls()
<commit_msg>Include static files specific to regform<commit_after># This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase
class WPManageRegistration(WPJinjaMixin, WPConferenceModifBase):
template_prefix = 'events/registration/'
sidemenu_option = 'registration'
def getJSFiles(self):
return (WPConferenceModifBase.getJSFiles(self) + self._asset_env['modules_registration_js'].urls() +
self._asset_env['indico_regform'].urls())
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + self._asset_env['registrationform_sass'].urls()
|
d239ac7241e61e35f8e9e7ce60a8a8735944028e
|
app/__init__.py
|
app/__init__.py
|
''' FAB CITY - VISUALIZAR 2016
--------------------------------------------
A web application powered by Flask and d3.js
to generate networks/datavisualisations
------------------------------------------
licence CC : BY - SA
---------------------------------------------
project by :
- FABLAB BARCELONA
- PING
developpers :
- Massimo M
- Mariana Q
- Julien P
with the support of :
MediaLab Prado - Visualizar 2016
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
'''FAB CITY DASHBOARD - VISUALIZAR'16
--------------------------------------------
A dashboard for all the Fab Cities where citizens can understand the existing resilience of cities and how the Maker movement is having an impact on this.
------------------------------------------
license: AGPL 3.0
---------------------------------------------
A project by: IAAC | Fab Lab Barcelona - Fab City Research Lab from the Fab City Global discussions.
Proposed at Visualizar'16 at Medialab Prado: http://fablabbcn.org/news/2016/05/12/visualizar.html
Participants at Visualizar'16:
- Massimo Menichinelli (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Mariana Quintero (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Julien Paris (PING)
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
Update info about the project
|
Update info about the project
|
Python
|
agpl-3.0
|
rubenlorenzo/fab-city-dashboard,rubenlorenzo/fab-city-dashboard,rubenlorenzo/fab-city-dashboard
|
''' FAB CITY - VISUALIZAR 2016
--------------------------------------------
A web application powered by Flask and d3.js
to generate networks/datavisualisations
------------------------------------------
licence CC : BY - SA
---------------------------------------------
project by :
- FABLAB BARCELONA
- PING
developpers :
- Massimo M
- Mariana Q
- Julien P
with the support of :
MediaLab Prado - Visualizar 2016
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
Update info about the project
|
'''FAB CITY DASHBOARD - VISUALIZAR'16
--------------------------------------------
A dashboard for all the Fab Cities where citizens can understand the existing resilience of cities and how the Maker movement is having an impact on this.
------------------------------------------
license: AGPL 3.0
---------------------------------------------
A project by: IAAC | Fab Lab Barcelona - Fab City Research Lab from the Fab City Global discussions.
Proposed at Visualizar'16 at Medialab Prado: http://fablabbcn.org/news/2016/05/12/visualizar.html
Participants at Visualizar'16:
- Massimo Menichinelli (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Mariana Quintero (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Julien Paris (PING)
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
<commit_before>''' FAB CITY - VISUALIZAR 2016
--------------------------------------------
A web application powered by Flask and d3.js
to generate networks/datavisualisations
------------------------------------------
licence CC : BY - SA
---------------------------------------------
project by :
- FABLAB BARCELONA
- PING
developpers :
- Massimo M
- Mariana Q
- Julien P
with the support of :
MediaLab Prado - Visualizar 2016
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
<commit_msg>Update info about the project<commit_after>
|
'''FAB CITY DASHBOARD - VISUALIZAR'16
--------------------------------------------
A dashboard for all the Fab Cities where citizens can understand the existing resilience of cities and how the Maker movement is having an impact on this.
------------------------------------------
license: AGPL 3.0
---------------------------------------------
A project by: IAAC | Fab Lab Barcelona - Fab City Research Lab from the Fab City Global discussions.
Proposed at Visualizar'16 at Medialab Prado: http://fablabbcn.org/news/2016/05/12/visualizar.html
Participants at Visualizar'16:
- Massimo Menichinelli (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Mariana Quintero (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Julien Paris (PING)
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
''' FAB CITY - VISUALIZAR 2016
--------------------------------------------
A web application powered by Flask and d3.js
to generate networks/datavisualisations
------------------------------------------
licence CC : BY - SA
---------------------------------------------
project by :
- FABLAB BARCELONA
- PING
developpers :
- Massimo M
- Mariana Q
- Julien P
with the support of :
MediaLab Prado - Visualizar 2016
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
Update info about the project'''FAB CITY DASHBOARD - VISUALIZAR'16
--------------------------------------------
A dashboard for all the Fab Cities where citizens can understand the existing resilience of cities and how the Maker movement is having an impact on this.
------------------------------------------
license: AGPL 3.0
---------------------------------------------
A project by: IAAC | Fab Lab Barcelona - Fab City Research Lab from the Fab City Global discussions.
Proposed at Visualizar'16 at Medialab Prado: http://fablabbcn.org/news/2016/05/12/visualizar.html
Participants at Visualizar'16:
- Massimo Menichinelli (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Mariana Quintero (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Julien Paris (PING)
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
<commit_before>''' FAB CITY - VISUALIZAR 2016
--------------------------------------------
A web application powered by Flask and d3.js
to generate networks/datavisualisations
------------------------------------------
licence CC : BY - SA
---------------------------------------------
project by :
- FABLAB BARCELONA
- PING
developpers :
- Massimo M
- Mariana Q
- Julien P
with the support of :
MediaLab Prado - Visualizar 2016
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
<commit_msg>Update info about the project<commit_after>'''FAB CITY DASHBOARD - VISUALIZAR'16
--------------------------------------------
A dashboard for all the Fab Cities where citizens can understand the existing resilience of cities and how the Maker movement is having an impact on this.
------------------------------------------
license: AGPL 3.0
---------------------------------------------
A project by: IAAC | Fab Lab Barcelona - Fab City Research Lab from the Fab City Global discussions.
Proposed at Visualizar'16 at Medialab Prado: http://fablabbcn.org/news/2016/05/12/visualizar.html
Participants at Visualizar'16:
- Massimo Menichinelli (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Mariana Quintero (IAAC | Fab Lab Barcelona - Fab City Research Lab)
- Julien Paris (PING)
---------------------------------------------
'''
from flask import Flask
import os
from .scripts.app_vars import static_dir ### custom static directory
app = Flask(__name__) ### default call
#app = Flask(__name__, static_path = static_dir ) ### change static directory adress to custom for Flask
from app import views
|
d0662f3ae97a194975e842dae2357a0a017aefe7
|
findingaids/__init__.py
|
findingaids/__init__.py
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, 'pre')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
Set version to 1.9.0-pre to indicate pre-release code
|
Set version to 1.9.0-pre to indicate pre-release code
|
Python
|
apache-2.0
|
emory-libraries/findingaids,emory-libraries/findingaids
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
Set version to 1.9.0-pre to indicate pre-release code
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, 'pre')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
<commit_before># file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
<commit_msg>Set version to 1.9.0-pre to indicate pre-release code<commit_after>
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, 'pre')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
Set version to 1.9.0-pre to indicate pre-release code# file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, 'pre')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
<commit_before># file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
<commit_msg>Set version to 1.9.0-pre to indicate pre-release code<commit_after># file findingaids/__init__.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version_info__ = (1, 9, 0, 'pre')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
5e231666a8c611fcac4683c33f6d92920b6b024d
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'trollius', # remove when catkin>0.4.4 is released
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
Revert "Remove dependency that was fixed upstream"
|
Revert "Remove dependency that was fixed upstream"
This reverts commit 9ee219d85849629eac53a28e72fa374a6c805ea4.
|
Python
|
mit
|
Rayman/ros-get,Rayman/ros-get
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
Revert "Remove dependency that was fixed upstream"
This reverts commit 9ee219d85849629eac53a28e72fa374a6c805ea4.
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'trollius', # remove when catkin>0.4.4 is released
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
<commit_before>#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
<commit_msg>Revert "Remove dependency that was fixed upstream"
This reverts commit 9ee219d85849629eac53a28e72fa374a6c805ea4.<commit_after>
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'trollius', # remove when catkin>0.4.4 is released
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
Revert "Remove dependency that was fixed upstream"
This reverts commit 9ee219d85849629eac53a28e72fa374a6c805ea4.#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'trollius', # remove when catkin>0.4.4 is released
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
<commit_before>#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
<commit_msg>Revert "Remove dependency that was fixed upstream"
This reverts commit 9ee219d85849629eac53a28e72fa374a6c805ea4.<commit_after>#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
def get_ini_variable(name):
with open(os.path.join(os.path.dirname(__file__), 'src', 'ros_get', '__init__.py')) as f:
return re.compile(r".*%s = '(.*?)'" % name, re.S).match(f.read()).group(1)
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as r_file:
readme = r_file.read()
setup(
name='ros_get',
license='MIT',
version=get_ini_variable('__version__'),
url=get_ini_variable('__url__'),
author=get_ini_variable('__author__'),
author_email=get_ini_variable('__email__'),
description='Simple tools for working with ROS source packages',
long_description=readme,
package_dir={'': 'src'}, # tell distutils packages are under src
packages=find_packages('src'), # include all packages under src
install_requires=[
'argcomplete',
'catkin_pkg',
'catkin_tools',
'colorlog',
'future',
'mock',
'rosdep',
'rosdistro >= 0.6.8',
'rosinstall_generator',
'trollius', # remove when catkin>0.4.4 is released
'vcstools',
'xdg==1.0.7',
],
entry_points={'console_scripts': ['ros-get=ros_get.__main__:main']}, )
|
8c74ae32fab10955304941ae7bf20def4718cf9e
|
setup.py
|
setup.py
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.18",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.19",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
Use seed as the js interpreter.
|
Use seed as the js interpreter.
|
Python
|
mit
|
chevah/pocket-lint,chevah/pocket-lint
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.18",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
Use seed as the js interpreter.
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.19",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
<commit_before>import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.18",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
<commit_msg>Use seed as the js interpreter.<commit_after>
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.19",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.18",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
Use seed as the js interpreter.import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.19",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
<commit_before>import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.18",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
<commit_msg>Use seed as the js interpreter.<commit_after>import subprocess
from distutils.core import setup
from distutils.command.sdist import sdist
class SignedSDistCommand(sdist):
"""Sign the source archive with a detached signature."""
description = "Sign the source archive after it is generated."
def run(self):
sdist.run(self)
gpg_args = [
'gpg', '--armor', '--sign', '--detach-sig', self.archive_files[0]]
gpg = subprocess.Popen(
gpg_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg.communicate()
setup(
name="pocketlint",
description="Pocket-lint a composite linter and style checker.",
version="0.5.19",
maintainer="Curtis C. Hovey",
maintainer_email="sinzui.is@verizon.net",
url="https://launchpad.net/pocket-lint",
packages=[
'pocketlint', 'pocketlint/contrib', 'pocketlint/contrib/pyflakes'],
package_dir={
'pocketlint': 'pocketlint',
'pocketlint/contrib': 'pocketlint/contrib'},
package_data={
'pocketlint': ['jsreporter.js'],
'pocketlint/contrib': ['fulljslint.js'],
},
scripts=['scripts/pocketlint'],
cmdclass={
'signed_sdist': SignedSDistCommand,
},
)
|
bcaedd4b1956a849443a9bb535f124b013ab2120
|
setup.py
|
setup.py
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('auth_urls',),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('backends', 'middleware', 'urls'),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
Add backends and middleware to the package
|
Add backends and middleware to the package
|
Python
|
bsd-2-clause
|
incuna/incuna-auth,incuna/incuna-auth,ghickman/incuna-auth,ghickman/incuna-auth
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('auth_urls',),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
Add backends and middleware to the package
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('backends', 'middleware', 'urls'),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
<commit_before>from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('auth_urls',),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
<commit_msg>Add backends and middleware to the package<commit_after>
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('backends', 'middleware', 'urls'),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('auth_urls',),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
Add backends and middleware to the packagefrom setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('backends', 'middleware', 'urls'),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
<commit_before>from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('auth_urls',),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
<commit_msg>Add backends and middleware to the package<commit_after>from setuptools import setup
install_requires = ('django-admin-sso',)
setup(
name='incuna-auth-urls',
version='0.1',
url='http://github.com/incuna/incuna-auth-urls',
py_modules=('backends', 'middleware', 'urls'),
include_package_data=True,
install_requires=install_requires,
description='Provides authentication parts.',
long_description=open('README.rst').read(),
author='Incuna Ltd',
author_email='admin@incuna.com',
)
|
44a2678526ee7f5bc897969ade7f00ce72e7e3a6
|
setup.py
|
setup.py
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
setup(
name='pelican_bibtex',
description='Manage your academic publications page with Pelican and BibTeX',
long_description=open('Readme.md').read(),
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Public Domain'
)
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_bibtex requires pybtex.
This plugin reads a user-specified BibTeX file and populates the context with
a list of publications, ready to be used in your Jinja2 template.
If the file is present and readable, you will be able to find the 'publications'
variable in all templates. It is a list of tuples with the following fields:
(key, text, bibtex, pdf, slides, poster)
1. key is the BibTeX key (identifier) of the entry.
2. text is the HTML formatted entry, generated by pybtex.
3. bibtex is a string containing BibTeX code for the entry, useful to make it
available to people who want to cite your work.
4. pdf, slides, poster: in your BibTeX file, you can add these special fields
"""
setup(
name='pelican_bibtex',
description='Organize your scientific publications with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
Fix Markdown bug and correct license information
|
Fix Markdown bug and correct license information
|
Python
|
unlicense
|
perror/pelican-bibtex,vene/pelican-bibtex,trovao/pelican-bibtex,anagno/pelican-bibtex
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
setup(
name='pelican_bibtex',
description='Manage your academic publications page with Pelican and BibTeX',
long_description=open('Readme.md').read(),
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Public Domain'
)Fix Markdown bug and correct license information
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_bibtex requires pybtex.
This plugin reads a user-specified BibTeX file and populates the context with
a list of publications, ready to be used in your Jinja2 template.
If the file is present and readable, you will be able to find the 'publications'
variable in all templates. It is a list of tuples with the following fields:
(key, text, bibtex, pdf, slides, poster)
1. key is the BibTeX key (identifier) of the entry.
2. text is the HTML formatted entry, generated by pybtex.
3. bibtex is a string containing BibTeX code for the entry, useful to make it
available to people who want to cite your work.
4. pdf, slides, poster: in your BibTeX file, you can add these special fields
"""
setup(
name='pelican_bibtex',
description='Organize your scientific publications with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
<commit_before>import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
setup(
name='pelican_bibtex',
description='Manage your academic publications page with Pelican and BibTeX',
long_description=open('Readme.md').read(),
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Public Domain'
)<commit_msg>Fix Markdown bug and correct license information<commit_after>
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_bibtex requires pybtex.
This plugin reads a user-specified BibTeX file and populates the context with
a list of publications, ready to be used in your Jinja2 template.
If the file is present and readable, you will be able to find the 'publications'
variable in all templates. It is a list of tuples with the following fields:
(key, text, bibtex, pdf, slides, poster)
1. key is the BibTeX key (identifier) of the entry.
2. text is the HTML formatted entry, generated by pybtex.
3. bibtex is a string containing BibTeX code for the entry, useful to make it
available to people who want to cite your work.
4. pdf, slides, poster: in your BibTeX file, you can add these special fields
"""
setup(
name='pelican_bibtex',
description='Organize your scientific publications with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
setup(
name='pelican_bibtex',
description='Manage your academic publications page with Pelican and BibTeX',
long_description=open('Readme.md').read(),
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Public Domain'
)Fix Markdown bug and correct license informationimport pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_bibtex requires pybtex.
This plugin reads a user-specified BibTeX file and populates the context with
a list of publications, ready to be used in your Jinja2 template.
If the file is present and readable, you will be able to find the 'publications'
variable in all templates. It is a list of tuples with the following fields:
(key, text, bibtex, pdf, slides, poster)
1. key is the BibTeX key (identifier) of the entry.
2. text is the HTML formatted entry, generated by pybtex.
3. bibtex is a string containing BibTeX code for the entry, useful to make it
available to people who want to cite your work.
4. pdf, slides, poster: in your BibTeX file, you can add these special fields
"""
setup(
name='pelican_bibtex',
description='Organize your scientific publications with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
<commit_before>import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
setup(
name='pelican_bibtex',
description='Manage your academic publications page with Pelican and BibTeX',
long_description=open('Readme.md').read(),
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
license='Public Domain'
)<commit_msg>Fix Markdown bug and correct license information<commit_after>import pelican_bibtex
from distutils.core import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: Public Domain
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Operating System :: POSIX
Operating System :: Unix
"""
LONG_DESCRIPTION = """\
Requirements
============
pelican\_bibtex requires pybtex.
This plugin reads a user-specified BibTeX file and populates the context with
a list of publications, ready to be used in your Jinja2 template.
If the file is present and readable, you will be able to find the 'publications'
variable in all templates. It is a list of tuples with the following fields:
(key, text, bibtex, pdf, slides, poster)
1. key is the BibTeX key (identifier) of the entry.
2. text is the HTML formatted entry, generated by pybtex.
3. bibtex is a string containing BibTeX code for the entry, useful to make it
available to people who want to cite your work.
4. pdf, slides, poster: in your BibTeX file, you can add these special fields
"""
setup(
name='pelican_bibtex',
description='Organize your scientific publications with BibTeX in Pelican',
long_description=LONG_DESCRIPTION,
version=pelican_bibtex.__version__,
author='Vlad Niculae',
author_email='vlad@vene.ro',
url='https://pypi.python.org/pypi/pelican_bibtex',
py_modules=['pelican_bibtex'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f]
)
|
d3496513274d76941d328f5a7dfd4f8561149039
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.8.0', 'python-dateutil']
setup(
name='readability-api',
version='0.2.5',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages= ['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.9.1', 'python-dateutil']
setup(
name='readability-api',
version='0.2.6',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages=['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
Update httplib2 version, bump version, fix formatting
|
Update httplib2 version, bump version, fix formatting
|
Python
|
mit
|
arc90/python-readability-api,arc90/python-readability-api
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.8.0', 'python-dateutil']
setup(
name='readability-api',
version='0.2.5',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages= ['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
Update httplib2 version, bump version, fix formatting
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.9.1', 'python-dateutil']
setup(
name='readability-api',
version='0.2.6',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages=['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.8.0', 'python-dateutil']
setup(
name='readability-api',
version='0.2.5',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages= ['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
<commit_msg>Update httplib2 version, bump version, fix formatting<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.9.1', 'python-dateutil']
setup(
name='readability-api',
version='0.2.6',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages=['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.8.0', 'python-dateutil']
setup(
name='readability-api',
version='0.2.5',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages= ['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
Update httplib2 version, bump version, fix formatting#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.9.1', 'python-dateutil']
setup(
name='readability-api',
version='0.2.6',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages=['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.8.0', 'python-dateutil']
setup(
name='readability-api',
version='0.2.5',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages= ['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
<commit_msg>Update httplib2 version, bump version, fix formatting<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
required = ['oauth2', 'httplib2==0.9.1', 'python-dateutil']
setup(
name='readability-api',
version='0.2.6',
description='Python wrapper for the Readability API.',
long_description=open('README.rst').read(),
author='The Readability Team',
author_email='feedback@readability.com',
url='https://www.readability.com/publishers/api',
packages=['readability'],
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.0',
# 'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
),
)
|
fdad6ecf70c43a7c0100ab75784bc69befa29965
|
setup.py
|
setup.py
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README.md').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
Fix the README file opening
|
Fix the README file opening
|
Python
|
bsd-2-clause
|
uthcode/sphinxcontrib-runcode
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
Fix the README file opening
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README.md').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
<commit_before>from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
<commit_msg>Fix the README file opening<commit_after>
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README.md').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
Fix the README file openingfrom setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README.md').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
<commit_before>from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
<commit_msg>Fix the README file opening<commit_after>from setuptools import setup
NAME = "sphinxcontrib-runcode"
VERSION = "0.0.1"
DESCRIPTION = "Post included code in an executable pastebin like codepad / ideone."
LONG_DESCRIPTION = open('README.md').read()
AUTHOR = "Senthil Kumaran (Uthcode)"
AUTHOR_EMAIL = "senthil@uthcode.com"
LICENSE = "BSD"
URL = "http://github.com/uthcode/sphinxcontrib-runcode"
DOWNLOAD_URL = "https://github.com/uthcode/sphinxcontrib-runcode/tree/downloads/packages"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Text Processing',
]
PLATFORMS = 'any'
REQUIRES = ['Sphinx']
PACKAGES = ['sphinxcontrib']
ZIP_SAFE = False
INCLUDE_PACKAGE_DATA = True
NAMESPACE_PACKAGES = ['sphinxcontrib']
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
requires=REQUIRES,
packages=PACKAGES,
zip_safe=ZIP_SAFE,
include_package_data=INCLUDE_PACKAGE_DATA,
namespace_packages=NAMESPACE_PACKAGES)
|
e06f9dc9b20c7499690f2a2d3d0506b2e84fa3b9
|
setup.py
|
setup.py
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.2",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
Include migrations in package and bump version to v0.3
|
Include migrations in package and bump version to v0.3
|
Python
|
mit
|
yunojuno/django-trello-webhooks,yunojuno/django-trello-webhooks
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.2",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
Include migrations in package and bump version to v0.3
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
<commit_before>import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.2",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
<commit_msg>Include migrations in package and bump version to v0.3<commit_after>
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.2",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
Include migrations in package and bump version to v0.3import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
<commit_before>import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.2",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
<commit_msg>Include migrations in package and bump version to v0.3<commit_after>import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="django-trello-webhooks",
version="0.3",
packages=[
'trello_webhooks',
'trello_webhooks.management',
'trello_webhooks.management.commands',
'trello_webhooks.migrations',
'trello_webhooks.templatetags',
'trello_webhooks.tests',
],
install_requires=['django>=1.7.1'],
include_package_data=True,
description='Django Trello Webhooks - Trello callback integration for Django.',
long_description=README,
url='https://github.com/yunojuno/django-trello-webhooks',
author='Hugo Rodger-Brown',
author_email='hugo@yunojuno.com',
maintainer='Hugo Rodger-Brown',
maintainer_email='hugo@yunojuno.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
22e7c62454b9db88fbc8f4df33fe94aade1385d8
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2, <3",
"lxml>=2.3, <4.0",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2",
"lxml>=2.3",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
Remove "less-than" restrictions on Astropy, LXML.
|
Remove "less-than" restrictions on Astropy, LXML.
I think I put these in place before I had Travis-CI cron-jobs available.
Therefore wanted to avoid future unknowns. Now at least an email gets sent
when there's a new release and it breaks something.
|
Python
|
bsd-2-clause
|
timstaley/voevent-parse,timstaley/voevent-parse
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2, <3",
"lxml>=2.3, <4.0",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
Remove "less-than" restrictions on Astropy, LXML.
I think I put these in place before I had Travis-CI cron-jobs available.
Therefore wanted to avoid future unknowns. Now at least an email gets sent
when there's a new release and it breaks something.
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2",
"lxml>=2.3",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
<commit_before>#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2, <3",
"lxml>=2.3, <4.0",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
<commit_msg>Remove "less-than" restrictions on Astropy, LXML.
I think I put these in place before I had Travis-CI cron-jobs available.
Therefore wanted to avoid future unknowns. Now at least an email gets sent
when there's a new release and it breaks something.<commit_after>
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2",
"lxml>=2.3",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2, <3",
"lxml>=2.3, <4.0",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
Remove "less-than" restrictions on Astropy, LXML.
I think I put these in place before I had Travis-CI cron-jobs available.
Therefore wanted to avoid future unknowns. Now at least an email gets sent
when there's a new release and it breaks something.#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2",
"lxml>=2.3",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
<commit_before>#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2, <3",
"lxml>=2.3, <4.0",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
<commit_msg>Remove "less-than" restrictions on Astropy, LXML.
I think I put these in place before I had Travis-CI cron-jobs available.
Therefore wanted to avoid future unknowns. Now at least an email gets sent
when there's a new release and it breaks something.<commit_after>#!/usr/bin/env python
from setuptools import find_packages, setup
import versioneer
install_requires = [
"astropy>=1.2",
"lxml>=2.3",
'iso8601',
'orderedmultidict',
'pytz',
'six',
]
test_requires = [
'pytest>3',
'coverage'
]
extras_require = {
'test': test_requires,
'all': test_requires,
}
setup(
name="voevent-parse",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
package_data={'voeventparse': ['fixtures/*.xml']},
description="Convenience routines for parsing and manipulation of "
"VOEvent XML packets.",
author="Tim Staley",
author_email="github@timstaley.co.uk",
url="https://github.com/timstaley/voevent-parse",
install_requires=install_requires,
extras_require=extras_require,
)
|
d45c14c1ee3275212535a98db161a0dbd23ed292
|
src/hue/BridgeScanner.py
|
src/hue/BridgeScanner.py
|
__author__ = 'hira'
|
import requests
import json
def get_bridge_ips():
res = requests.get('http://www.meethue.com/api/nupnp').text
data = json.loads(res)
return [map['internalipaddress'] for map in data]
print(get_bridge_ips())
|
Enable finding Hue bridge on network.
|
Enable finding Hue bridge on network.
|
Python
|
mit
|
almichest/hue_app,almichest/hue_app
|
__author__ = 'hira'
Enable finding Hue bridge on network.
|
import requests
import json
def get_bridge_ips():
res = requests.get('http://www.meethue.com/api/nupnp').text
data = json.loads(res)
return [map['internalipaddress'] for map in data]
print(get_bridge_ips())
|
<commit_before>__author__ = 'hira'
<commit_msg>Enable finding Hue bridge on network.<commit_after>
|
import requests
import json
def get_bridge_ips():
res = requests.get('http://www.meethue.com/api/nupnp').text
data = json.loads(res)
return [map['internalipaddress'] for map in data]
print(get_bridge_ips())
|
__author__ = 'hira'
Enable finding Hue bridge on network.import requests
import json
def get_bridge_ips():
res = requests.get('http://www.meethue.com/api/nupnp').text
data = json.loads(res)
return [map['internalipaddress'] for map in data]
print(get_bridge_ips())
|
<commit_before>__author__ = 'hira'
<commit_msg>Enable finding Hue bridge on network.<commit_after>import requests
import json
def get_bridge_ips():
res = requests.get('http://www.meethue.com/api/nupnp').text
data = json.loads(res)
return [map['internalipaddress'] for map in data]
print(get_bridge_ips())
|
ce1282f970276c21e020dca897ba138f07089772
|
setup.py
|
setup.py
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
]
)
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
'libvirt-python',
]
)
|
Add missing requirement for libvirt-python
|
Add missing requirement for libvirt-python
libvirt-python is missing from setup.py
Change-Id: I41c2e29d612ba0b45f94c2340b9a6a3472d5bbdc
Closes-bug: #1385439
|
Python
|
apache-2.0
|
stackforge/fuel-devops,stackforge/fuel-devops
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
]
)
Add missing requirement for libvirt-python
libvirt-python is missing from setup.py
Change-Id: I41c2e29d612ba0b45f94c2340b9a6a3472d5bbdc
Closes-bug: #1385439
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
'libvirt-python',
]
)
|
<commit_before># Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
]
)
<commit_msg>Add missing requirement for libvirt-python
libvirt-python is missing from setup.py
Change-Id: I41c2e29d612ba0b45f94c2340b9a6a3472d5bbdc
Closes-bug: #1385439<commit_after>
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
'libvirt-python',
]
)
|
# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
]
)
Add missing requirement for libvirt-python
libvirt-python is missing from setup.py
Change-Id: I41c2e29d612ba0b45f94c2340b9a6a3472d5bbdc
Closes-bug: #1385439# Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
'libvirt-python',
]
)
|
<commit_before># Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
]
)
<commit_msg>Add missing requirement for libvirt-python
libvirt-python is missing from setup.py
Change-Id: I41c2e29d612ba0b45f94c2340b9a6a3472d5bbdc
Closes-bug: #1385439<commit_after># Copyright 2013 - 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import find_packages
from setuptools import setup
setup(
name='devops',
version='2.5.2',
description='Library for creating and manipulating virtual environments',
author='Mirantis, Inc.',
author_email='product@mirantis.com',
url='http://mirantis.com',
keywords='devops virtual environment mirantis',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
data_files=[
(os.path.expanduser('~/.devops'), ['devops/log.yaml']),
(os.path.expanduser('~/.devops/log'), [])],
scripts=['bin/dos.py'],
install_requires=[
'xmlbuilder',
'ipaddr',
'paramiko',
'django<1.7',
'psycopg2',
'south',
'PyYAML',
'mock',
'libvirt-python',
]
)
|
01db313ac676fed8a94862c5fbe38e07615f18d6
|
setup.py
|
setup.py
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7 3.8'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
Add 3.8 to the list of supported versions
|
Add 3.8 to the list of supported versions
|
Python
|
apache-2.0
|
browncoat-ninjas/nimoy
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
Add 3.8 to the list of supported versions
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7 3.8'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
<commit_before>from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
<commit_msg>Add 3.8 to the list of supported versions<commit_after>
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7 3.8'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
Add 3.8 to the list of supported versionsfrom setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7 3.8'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
<commit_before>from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
<commit_msg>Add 3.8 to the list of supported versions<commit_after>from setuptools import setup
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
] + [
('Programming Language :: Python :: %s' % x) for x in '3 3.3 3.4 3.5 3.6 3.7 3.8'.split()
]
with open('README.rst') as read_me:
long_description = read_me.read()
setup(
name='nimoy-framework',
version='0.0.1b7',
description='A testing and specification framework for Python 3, heavily inspired by the Spock Framework',
long_description=long_description,
url='https://github.com/browncoat-ninjas/nimoy',
license='Apache License',
platforms=['unix', 'linux', 'osx'],
author='Noam Tenne, Yoav Luft',
author_email='noam@10ne.org',
entry_points={'console_scripts': ['nimoy = nimoy.main:main']},
classifiers=classifiers,
keywords="test unittest specification",
packages=['nimoy', 'nimoy.assertions', 'nimoy.ast_tools', 'nimoy.context', 'nimoy.runner', 'nimoy.compare'],
install_requires=['pyhamcrest==1.9.0'],
)
|
7ed32044c0e122529d3a2f3fd709e50a919c0531
|
setup.py
|
setup.py
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
Set Development Status to Beta
|
Set Development Status to Beta
|
Python
|
lgpl-2.1
|
elapouya/NoAttr
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
Set Development Status to Beta
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
<commit_before>from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
<commit_msg>Set Development Status to Beta<commit_after>
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
Set Development Status to Betafrom setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
<commit_before>from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
<commit_msg>Set Development Status to Beta<commit_after>from setuptools import setup
import os
def read(*names):
values = dict()
for name in names:
filename = name + '.rst'
if os.path.isfile(filename):
fd = open(filename)
value = fd.read()
fd.close()
else:
value = ''
values[name] = value
return values
long_description = """
%(README)s
News
====
%(CHANGES)s
""" % read('README', 'CHANGES')
setup(name='noattr',
version='0.0.1',
description='Python text operations module',
long_description=long_description,
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
],
keywords='attribute, AttrDict',
url='https://github.com/elapouya/noattr',
author='Eric Lapouyade',
author_email='elapouya@gmail.com',
license='LGPL 2.1',
packages=['noattr'],
install_requires = [],
eager_resources = [],
zip_safe=False)
|
9d972bfa792c08d4f3ce6a6b7cfd9877f801b5e3
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
#!/usr/bin/env python
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
Use content or README.md as long_description
|
Use content or README.md as long_description
|
Python
|
mit
|
vilcans/screenplain,vilcans/screenplain,vilcans/screenplain
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
Use content or README.md as long_description
|
#!/usr/bin/env python
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
<commit_msg>Use content or README.md as long_description<commit_after>
|
#!/usr/bin/env python
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
Use content or README.md as long_description#!/usr/bin/env python
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
<commit_msg>Use content or README.md as long_description<commit_after>#!/usr/bin/env python
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='screenplain',
version='0.9.0',
description='Convert text file to viewable screenplay.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Martin Vilcans',
author_email='screenplain@librador.com',
url='http://www.screenplain.com/',
project_urls={
'Web Page': 'http://www.screenplain.com/',
'Source': 'https://github.com/vilcans/screenplain',
},
license='MIT',
install_requires=[
],
extras_require={
'PDF': 'reportlab'
},
packages=[
'screenplain',
'screenplain.export',
'screenplain.parsers',
],
package_data={
'screenplain.export': ['default.css']
},
scripts=[
'bin/screenplain'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
|
9678a6cf5cb9431419f4f404ec07fc9d4091cbde
|
setup.py
|
setup.py
|
from setuptools import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
from distutils.core import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
Use distutils to avoid external dependencies
|
Use distutils to avoid external dependencies
|
Python
|
bsd-2-clause
|
dcwatson/bbcode
|
from setuptools import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
Use distutils to avoid external dependencies
|
from distutils.core import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
<commit_before>from setuptools import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
<commit_msg>Use distutils to avoid external dependencies<commit_after>
|
from distutils.core import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
from setuptools import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
Use distutils to avoid external dependenciesfrom distutils.core import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
<commit_before>from setuptools import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
<commit_msg>Use distutils to avoid external dependencies<commit_after>from distutils.core import setup
import sys
setup(
name='bbcode',
version='1.0.5',
description='A pure python bbcode parser and formatter.',
author='Dan Watson',
author_email='dcwatson@gmail.com',
url='https://bitbucket.org/dcwatson/bbcode',
license='BSD',
py_modules=['bbcode'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
'Topic :: Text Processing :: Markup',
]
)
|
86774ca5e2a5f31642a4230e9060e38e278a7f39
|
setup.py
|
setup.py
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect >= 0.0.5",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
Add lower version bound on `"oedialect"` dependency
|
Add lower version bound on `"oedialect"` dependency
For some reason readthedocs use an earlier version and fails when trying
to use it.
|
Python
|
mit
|
oemof/feedinlib
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
Add lower version bound on `"oedialect"` dependency
For some reason readthedocs use an earlier version and fails when trying
to use it.
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect >= 0.0.5",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
<commit_before>import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
<commit_msg>Add lower version bound on `"oedialect"` dependency
For some reason readthedocs use an earlier version and fails when trying
to use it.<commit_after>
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect >= 0.0.5",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
Add lower version bound on `"oedialect"` dependency
For some reason readthedocs use an earlier version and fails when trying
to use it.import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect >= 0.0.5",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
<commit_before>import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
<commit_msg>Add lower version bound on `"oedialect"` dependency
For some reason readthedocs use an earlier version and fails when trying
to use it.<commit_after>import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="feedinlib",
version="0.1.0rc1",
description="Creating time series from pv or wind power plants.",
url="http://github.com/oemof/feedinlib",
author="oemof developer group",
author_email="windpowerlib@rl-institut.de",
license="MIT",
packages=["feedinlib"],
long_description=read("README.rst"),
long_description_content_type="text/x-rst",
zip_safe=False,
install_requires=[
"cdsapi >= 0.1.4",
"numpy >= 1.7.0",
"oedialect >= 0.0.5",
"open_FRED-cli",
"pandas >= 0.13.1",
"pvlib >= 0.6.0",
"windpowerlib >= 0.2.0",
"xarray >= 0.12.0",
],
extras_require={
"dev": ["jupyter", "pytest", "shapely", "sphinx_rtd_theme"],
"examples": ["jupyter", "shapely"],
},
)
|
4f31a839906d7c48473281bba4f1c628619a5972
|
setup.py
|
setup.py
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.1',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.2',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
Upgrade dependency appdirs to ==1.4.2
|
Upgrade dependency appdirs to ==1.4.2
|
Python
|
mit
|
renanivo/with
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.1',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
Upgrade dependency appdirs to ==1.4.2
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.2',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
<commit_before>import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.1',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
<commit_msg>Upgrade dependency appdirs to ==1.4.2<commit_after>
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.2',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.1',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
Upgrade dependency appdirs to ==1.4.2import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.2',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
<commit_before>import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.1',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
<commit_msg>Upgrade dependency appdirs to ==1.4.2<commit_after>import os
from setuptools import setup
from withtool import __version__
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, encoding='utf-8') as f:
return f.read()
setup(
name='with',
version=__version__,
description='A shell context manager',
long_description=read('README.rst'),
author='Renan Ivo',
author_email='renanivom@gmail.com',
url='https://github.com/renanivo/with',
keywords='context manager shell command line repl',
scripts=['bin/with'],
install_requires=[
'appdirs==1.4.2',
'docopt==0.6.2',
'prompt-toolkit==1.0',
'python-slugify==1.2.1',
],
packages=['withtool'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
|
4de541e9832bc45b9e29a6eec0a5d03858fc5b71
|
setup.py
|
setup.py
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0", "Pygments>=1.0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
Make sure the latest pygments is used
|
Make sure the latest pygments is used
|
Python
|
bsd-3-clause
|
Pylons/kai,Pylons/kai
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
Make sure the latest pygments is used
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0", "Pygments>=1.0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
<commit_before>try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
<commit_msg>Make sure the latest pygments is used<commit_after>
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0", "Pygments>=1.0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
Make sure the latest pygments is usedtry:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0", "Pygments>=1.0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
<commit_before>try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
<commit_msg>Make sure the latest pygments is used<commit_after>try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='kai',
version='0.1',
description='',
author='Ben Bangert',
author_email='ben@groovie.org',
install_requires=[
"Pylons>=0.9.7rc4", "CouchDB>=0.4", "python-openid>=2.2.1",
"pytz>=2008i", "Babel>=0.9.4", "tw.forms==0.9.2", "docutils>=0.5",
"PyXML>=0.8.4", "cssutils>=0.9.6a0", "Pygments>=1.0",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'kai': ['i18n/*/LC_MESSAGES/*.mo']},
message_extractors = {'kai': [
('**.py', 'python', None),
('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = kai.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
|
9b422f71123246632e0e8c505ea9721955a2eada
|
setup.py
|
setup.py
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://packages.python.org/Flask-Script>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
Update documentation link on PyPI to flask-script.readthedocs.org
|
Update documentation link on PyPI to flask-script.readthedocs.org
|
Python
|
bsd-3-clause
|
blakev/flask-script,z4y4ts/flask-script,z4y4ts/flask-script,wjt/flask-script,dext0r/flask-script,xingkaixin/flask-script,denismakogon/flask-script
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://packages.python.org/Flask-Script>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
Update documentation link on PyPI to flask-script.readthedocs.org
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
<commit_before>"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://packages.python.org/Flask-Script>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
<commit_msg>Update documentation link on PyPI to flask-script.readthedocs.org<commit_after>
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://packages.python.org/Flask-Script>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
Update documentation link on PyPI to flask-script.readthedocs.org"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
<commit_before>"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://packages.python.org/Flask-Script>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
<commit_msg>Update documentation link on PyPI to flask-script.readthedocs.org<commit_after>"""
Flask-Script
--------------
Flask support for writing external scripts.
Links
`````
* `documentation <http://flask-script.readthedocs.org>`_
"""
from setuptools import setup
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# https://github.com/pypa/virtualenv/pull/259)
try:
import multiprocessing
except ImportError:
pass
setup(
name='Flask-Script',
version='0.4.0',
url='http://github.com/techniq/flask-script',
license='BSD',
author='Dan Jacob',
author_email='danjac354@gmail.com',
maintainer='Sean Lynch',
maintainer_email='techniq35@gmail.com',
description='Scripting support for Flask',
long_description=__doc__,
py_modules=[
'flask_script'
],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'argparse',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
cdb869a872d6c81319f3f072a00c8c99635f4484
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from os.path import dirname, abspath
HERE = abspath(dirname(__file__))
VERSION = open(HERE + '/puresnmp/version.txt').read().strip()
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open(HERE + "/README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
from setuptools import setup, find_packages
VERSION = '1.1.4'
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open("README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
Revert "Revert "Another attempt to fix the RTD build.""
|
Revert "Revert "Another attempt to fix the RTD build.""
This reverts commit bb96586af0aa0fcf6ca5b1891740fbc02f3758c8.
|
Python
|
mit
|
exhuma/puresnmp,exhuma/puresnmp
|
from setuptools import setup, find_packages
from os.path import dirname, abspath
HERE = abspath(dirname(__file__))
VERSION = open(HERE + '/puresnmp/version.txt').read().strip()
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open(HERE + "/README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
Revert "Revert "Another attempt to fix the RTD build.""
This reverts commit bb96586af0aa0fcf6ca5b1891740fbc02f3758c8.
|
from setuptools import setup, find_packages
VERSION = '1.1.4'
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open("README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
<commit_before>from setuptools import setup, find_packages
from os.path import dirname, abspath
HERE = abspath(dirname(__file__))
VERSION = open(HERE + '/puresnmp/version.txt').read().strip()
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open(HERE + "/README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
<commit_msg>Revert "Revert "Another attempt to fix the RTD build.""
This reverts commit bb96586af0aa0fcf6ca5b1891740fbc02f3758c8.<commit_after>
|
from setuptools import setup, find_packages
VERSION = '1.1.4'
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open("README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
from setuptools import setup, find_packages
from os.path import dirname, abspath
HERE = abspath(dirname(__file__))
VERSION = open(HERE + '/puresnmp/version.txt').read().strip()
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open(HERE + "/README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
Revert "Revert "Another attempt to fix the RTD build.""
This reverts commit bb96586af0aa0fcf6ca5b1891740fbc02f3758c8.from setuptools import setup, find_packages
VERSION = '1.1.4'
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open("README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
<commit_before>from setuptools import setup, find_packages
from os.path import dirname, abspath
HERE = abspath(dirname(__file__))
VERSION = open(HERE + '/puresnmp/version.txt').read().strip()
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open(HERE + "/README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
<commit_msg>Revert "Revert "Another attempt to fix the RTD build.""
This reverts commit bb96586af0aa0fcf6ca5b1891740fbc02f3758c8.<commit_after>from setuptools import setup, find_packages
VERSION = '1.1.4'
setup(
name="puresnmp",
version=VERSION,
description="Pure Python SNMP implementation",
long_description=open("README.rst").read(),
author="Michel Albert",
author_email="michel@albert.lu",
provides=['puresnmp'],
license="MIT",
include_package_data=True,
install_requires=[
'typing',
],
extras_require={
'dev': [],
'test': ['pytest-xdist', 'pytest', 'pytest-coverage']
},
packages=find_packages(exclude=["tests.*", "tests", "docs"]),
url="https://github.com/exhuma/puresnmp",
keywords="networking snmp",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration',
]
)
|
981284e81044807d0984a0cfd8ac2ed84914b36a
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3'
'appdirs'
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3',
'appdirs',
'peewee',
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
Add peewee dependency for simpledb.
|
Add peewee dependency for simpledb.
|
Python
|
mit
|
supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3'
'appdirs'
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
Add peewee dependency for simpledb.
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3',
'appdirs',
'peewee',
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
<commit_before>from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3'
'appdirs'
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
<commit_msg>Add peewee dependency for simpledb.<commit_after>
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3',
'appdirs',
'peewee',
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3'
'appdirs'
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
Add peewee dependency for simpledb.from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3',
'appdirs',
'peewee',
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
<commit_before>from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3'
'appdirs'
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
<commit_msg>Add peewee dependency for simpledb.<commit_after>from setuptools import setup, find_packages
setup(
name='weaveserver',
version='0.8',
author='Srivatsan Iyer',
author_email='supersaiyanmode.rox@gmail.com',
packages=find_packages(),
license='MIT',
description='Library to interact with Weave Server',
long_description=open('README.md').read(),
install_requires=[
'weavelib',
'eventlet!=0.22',
'bottle',
'GitPython',
'redis',
'sqlite3',
'appdirs',
'peewee',
],
entry_points={
'console_scripts': [
'weave-launch = app:handle_launch',
'weave-main = app:handle_main'
]
}
)
|
e094679810d9c2bc2ea7913f5422869e75b9f499
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.0.2',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.1.0',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
Fix version. Bump minor version, not release version.
|
Fix version. Bump minor version, not release version.
|
Python
|
mit
|
yola/ultradns
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.0.2',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
Fix version. Bump minor version, not release version.
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.1.0',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.0.2',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
<commit_msg>Fix version. Bump minor version, not release version.<commit_after>
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.1.0',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.0.2',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
Fix version. Bump minor version, not release version.#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.1.0',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.0.2',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
<commit_msg>Fix version. Bump minor version, not release version.<commit_after>#!/usr/bin/env python
from setuptools import setup
setup(
name='youltradns',
version='0.1.0',
description='Yola UltraDNS Client',
author='Yola',
author_email='engineers@yola.com',
url='http://github.com/yola/youltradns',
packages=['youltradns'],
install_requires=['requests >= 1.0.0, < 2.0.0'],
)
|
e09ca6ded17b851fdc4329bb86188e10309787c8
|
setup.py
|
setup.py
|
import os
from setuptools import setup
from setuptools.command.build_py import build_py
version = os.environ.get('CI_COMMIT_TAG', None)
class BuildPyCommand(build_py):
def run(self):
if version is None:
raise RuntimeError('CI_COMMIT_TAG must defined as an environment variable to build.')
build_py.run(self)
setup(
cmdclass={
'build_py': BuildPyCommand,
},
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
import os
from setuptools import setup
version = os.environ.get('CI_COMMIT_TAG', None)
setup(
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
Remove CI_COMMIT_TAG requirement, docs can't be built without it
|
Remove CI_COMMIT_TAG requirement, docs can't be built without it
|
Python
|
mit
|
JensAstrup/pyEchosign
|
import os
from setuptools import setup
from setuptools.command.build_py import build_py
version = os.environ.get('CI_COMMIT_TAG', None)
class BuildPyCommand(build_py):
def run(self):
if version is None:
raise RuntimeError('CI_COMMIT_TAG must defined as an environment variable to build.')
build_py.run(self)
setup(
cmdclass={
'build_py': BuildPyCommand,
},
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
Remove CI_COMMIT_TAG requirement, docs can't be built without it
|
import os
from setuptools import setup
version = os.environ.get('CI_COMMIT_TAG', None)
setup(
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
<commit_before>import os
from setuptools import setup
from setuptools.command.build_py import build_py
version = os.environ.get('CI_COMMIT_TAG', None)
class BuildPyCommand(build_py):
def run(self):
if version is None:
raise RuntimeError('CI_COMMIT_TAG must defined as an environment variable to build.')
build_py.run(self)
setup(
cmdclass={
'build_py': BuildPyCommand,
},
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
<commit_msg>Remove CI_COMMIT_TAG requirement, docs can't be built without it<commit_after>
|
import os
from setuptools import setup
version = os.environ.get('CI_COMMIT_TAG', None)
setup(
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
import os
from setuptools import setup
from setuptools.command.build_py import build_py
version = os.environ.get('CI_COMMIT_TAG', None)
class BuildPyCommand(build_py):
def run(self):
if version is None:
raise RuntimeError('CI_COMMIT_TAG must defined as an environment variable to build.')
build_py.run(self)
setup(
cmdclass={
'build_py': BuildPyCommand,
},
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
Remove CI_COMMIT_TAG requirement, docs can't be built without itimport os
from setuptools import setup
version = os.environ.get('CI_COMMIT_TAG', None)
setup(
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
<commit_before>import os
from setuptools import setup
from setuptools.command.build_py import build_py
version = os.environ.get('CI_COMMIT_TAG', None)
class BuildPyCommand(build_py):
def run(self):
if version is None:
raise RuntimeError('CI_COMMIT_TAG must defined as an environment variable to build.')
build_py.run(self)
setup(
cmdclass={
'build_py': BuildPyCommand,
},
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
<commit_msg>Remove CI_COMMIT_TAG requirement, docs can't be built without it<commit_after>import os
from setuptools import setup
version = os.environ.get('CI_COMMIT_TAG', None)
setup(
name='pyEchosign',
version=version,
packages=['pyEchosign', 'pyEchosign.classes', 'pyEchosign.exceptions', 'pyEchosign.utils'],
url='https://gitlab.com/jensastrup/pyEchosign',
license='MIT',
author='Jens Astrup',
author_email='jensaiden@gmail.com',
description='Connect to the Echosign API without constructing HTTP requests',
long_description=open('README.rst').read(),
install_requires=['requests>=2.12.4, <3.0.0', 'arrow>=0.10.0, <1.0.0'],
tests_require=['coverage', 'nose'],
keywords='adobe echosign',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Office/Business',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
]
)
|
f3ec0a18db0aa9e8b3c9b0e0beaf0bc852578adc
|
setup.py
|
setup.py
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"ipdb",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
Remove ipython from test deps as Travis is broken
|
Remove ipython from test deps as Travis is broken
|
Python
|
mit
|
douglas/toxiproxy-python,douglas/toxiproxy-python
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"ipdb",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
Remove ipython from test deps as Travis is broken
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
<commit_before>from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"ipdb",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
<commit_msg>Remove ipython from test deps as Travis is broken<commit_after>
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"ipdb",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
Remove ipython from test deps as Travis is brokenfrom setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
<commit_before>from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"ipdb",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
<commit_msg>Remove ipython from test deps as Travis is broken<commit_after>from setuptools import setup
NAME = "toxiproxy"
VERSION = "0.1.0"
DESCRIPTION = "Python library for Toxiproxy"
LONG_DESCRIPTION = """\
A Python library for controlling Toxiproxy. Can be used in resiliency testing."""
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author="Douglas Soares de Andrade",
author_email="contato@douglasandrade.com",
url="https://github.com/douglas/toxiproxy-python",
packages=["toxiproxy"],
scripts=[],
license="MIT License",
install_requires=[
"future",
"requests"
],
test_suite="test",
setup_requires=[
"pytest-runner",
"pytest"
],
tests_require=[
"pytest-sugar",
"pytest",
"profilehooks"
],
platforms="Any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
|
83928f77fb82da01b9521646ffc6b965f70e1a95
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.requirements',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
Add cuda.requirements to packages to install
|
Add cuda.requirements to packages to install
|
Python
|
mit
|
niboshi/chainer,benob/chainer,jnishi/chainer,jnishi/chainer,muupan/chainer,woodshop/complex-chainer,chainer/chainer,keisuke-umezawa/chainer,wkentaro/chainer,tscohen/chainer,kikusu/chainer,ikasumi/chainer,jnishi/chainer,ktnyt/chainer,cemoody/chainer,ktnyt/chainer,ysekky/chainer,tkerola/chainer,wkentaro/chainer,nushio3/chainer,pfnet/chainer,kuwa32/chainer,elviswf/chainer,masia02/chainer,bayerj/chainer,okuta/chainer,hvy/chainer,benob/chainer,delta2323/chainer,hvy/chainer,truongdq/chainer,minhpqn/chainer,kashif/chainer,chainer/chainer,sinhrks/chainer,woodshop/chainer,t-abe/chainer,chainer/chainer,umitanuki/chainer,AlpacaDB/chainer,rezoo/chainer,okuta/chainer,anaruse/chainer,kiyukuta/chainer,sou81821/chainer,okuta/chainer,hvy/chainer,AlpacaDB/chainer,chainer/chainer,1986ks/chainer,jnishi/chainer,hidenori-t/chainer,kikusu/chainer,ktnyt/chainer,niboshi/chainer,keisuke-umezawa/chainer,cupy/cupy,nushio3/chainer,keisuke-umezawa/chainer,niboshi/chainer,aonotas/chainer,ttakamura/chainer,ytoyama/yans_chainer_hackathon,muupan/chainer,tereka114/chainer,laysakura/chainer,ttakamura/chainer,jfsantos/chainer,ktnyt/chainer,hvy/chainer,truongdq/chainer,sinhrks/chainer,ronekko/chainer,okuta/chainer,keisuke-umezawa/chainer,Kaisuke5/chainer,tigerneil/chainer,niboshi/chainer,cupy/cupy,cupy/cupy,wkentaro/chainer,yanweifu/chainer,t-abe/chainer,wavelets/chainer,wkentaro/chainer,cupy/cupy
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
Add cuda.requirements to packages to install
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.requirements',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
<commit_msg>Add cuda.requirements to packages to install<commit_after>
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.requirements',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
Add cuda.requirements to packages to install#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.requirements',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
<commit_msg>Add cuda.requirements to packages to install<commit_after>#!/usr/bin/env python
from distutils.core import setup
setup(
name='chainer',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=['chainer',
'chainer.cudnn',
'chainer.functions',
'chainer.optimizers',
'chainer.requirements',
'chainer.utils'],
install_requires=['numpy'],
scripts=['scripts/chainer-cuda-requirements'],
)
|
81ae197f05e81daddc292989ef33c0e9610a14d0
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
Add Python 3 trove classifiers
|
Add Python 3 trove classifiers
Fixes #46.
|
Python
|
bsd-3-clause
|
praekelt/django-recaptcha,praekelt/django-recaptcha
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
Add Python 3 trove classifiers
Fixes #46.
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
<commit_before>from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
<commit_msg>Add Python 3 trove classifiers
Fixes #46.<commit_after>
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
Add Python 3 trove classifiers
Fixes #46.from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
<commit_before>from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
<commit_msg>Add Python 3 trove classifiers
Fixes #46.<commit_after>from setuptools import setup, find_packages
long_desc = open('README.rst', 'rb').read().decode('utf-8') + \
open('AUTHORS.rst', 'rb').read().decode('utf-8') + \
open('CHANGELOG.rst', 'rb').read().decode('utf-8')
setup(
name='django-recaptcha',
version='1.0.5',
description='Django recaptcha form field/widget app.',
long_description=long_desc,
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
url='http://github.com/praekelt/django-recaptcha',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-setuptest>=0.2.1',
],
test_suite="setuptest.setuptest.SetupTestSuite",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
"License :: OSI Approved :: BSD License",
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
zip_safe=False,
)
|
436508cbad637c42346130bda2745907dba4b0a2
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
Add Python3 classfier fo CheeseShop.
|
Add Python3 classfier fo CheeseShop.
|
Python
|
mit
|
kaste/mockito-python,lwoydziak/mockito-python,zhilts/pymockito,zhilts/pymockito
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
Add Python3 classfier fo CheeseShop.
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
<commit_before>#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
<commit_msg>Add Python3 classfier fo CheeseShop.<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
Add Python3 classfier fo CheeseShop.#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
<commit_before>#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
<commit_msg>Add Python3 classfier fo CheeseShop.<commit_after>#!/usr/bin/env python
# coding: utf-8
from distribute_setup import use_setuptools
use_setuptools()
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='mockito',
version='0.3.0',
packages=['mockito', 'mockito_test', 'mockito_util'],
url='http://code.google.com/p/mockito/wiki/MockitoForPython',
download_url='http://bitbucket.org/szczepiq/mockito-python/downloads/',
maintainer='mockito maintainers',
maintainer_email='mockito-python@googlegroups.com',
license='MIT',
description='Spying framework',
long_description='Mockito is a spying framework based on Java library with the same name.',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3'
],
test_loader = 'mockito_util.test:TestLoader',
test_suite = 'mockito_test',
**extra
)
|
2f7ce78c54e196168b8bab192a07248d578d0b8d
|
flask_app/config/autoclave.py
|
flask_app/config/autoclave.py
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
Fix misconfiguration in static root path computation
|
Fix misconfiguration in static root path computation
|
Python
|
bsd-3-clause
|
vmalloc/mailboxer,vmalloc/logpile,vmalloc/logpile,getslash/mailboxer,vmalloc/logpile,getslash/mailboxer,getslash/mailboxer,vmalloc/mailboxer,vmalloc/mailboxer
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
Fix misconfiguration in static root path computation
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
<commit_before>import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
<commit_msg>Fix misconfiguration in static root path computation<commit_after>
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
Fix misconfiguration in static root path computationimport os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
<commit_before>import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
<commit_msg>Fix misconfiguration in static root path computation<commit_after>import os
AUTOCLAVE_APP_NAME = "autoclave"
AUTOCLAVE_USER_NAME = AUTOCLAVE_APP_NAME
AUTOCLAVE_DEPLOY_ROOT = os.path.join("/opt", AUTOCLAVE_APP_NAME)
AUTOCLAVE_DEPLOY_SRC_ROOT = os.path.join(AUTOCLAVE_DEPLOY_ROOT, "src")
AUTOCLAVE_API_ROOT = "/api"
AUTOCLAVE_CELERY_WORKER_SERVICE_NAME = AUTOCLAVE_APP_NAME + "-celery"
AUTOCLAVE_DATA_ROOT = "/data"
AUTOCLAVE_REDIS_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "redis")
AUTOCLAVE_MONGO_DB_PATH = os.path.join(AUTOCLAVE_DATA_ROOT, "mongo")
AUTOCLAVE_APP_TCP_PORT = 5353
AUTOCLAVE_DATABASE_HOST = "127.0.0.1"
AUTOCLAVE_DEPLOYMENT_FRONTEND_TCP_PORT = 80
AUTOCLAVE_STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "static"))
AUTOCLAVE_TESTING_FRONTEND_TCP_PORT = 8080
|
e19dd09b0243c2edd5c3d2d18399f5a11b9c892b
|
app/__init__.py
|
app/__init__.py
|
import mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
|
import logging, mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
# Create an instance of a logger
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
|
Add a logging instance to the package
|
Add a logging instance to the package
|
Python
|
mit
|
Zillolo/lazy-todo
|
import mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
Add a logging instance to the package
|
import logging, mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
# Create an instance of a logger
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
|
<commit_before>import mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
<commit_msg>Add a logging instance to the package<commit_after>
|
import logging, mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
# Create an instance of a logger
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
|
import mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
Add a logging instance to the packageimport logging, mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
# Create an instance of a logger
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
|
<commit_before>import mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
<commit_msg>Add a logging instance to the package<commit_after>import logging, mongoengine
# Open a connection to the MongoDB database, to be shared throughout the
# application.
db = mongoengine.connect('lazy', host='127.0.0.1', port=27017)
# Create an instance of a logger
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
|
6e540d7125e76c2d4d7d06662ab283a5d698c86b
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver/tests/tests.py'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
Add correct path to tests
|
Add correct path to tests
|
Python
|
mit
|
tcmoore3/linear_solver
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
Add correct path to tests
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver/tests/tests.py'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
<commit_before>from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
<commit_msg>Add correct path to tests<commit_after>
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver/tests/tests.py'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
Add correct path to testsfrom setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver/tests/tests.py'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
<commit_before>from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
<commit_msg>Add correct path to tests<commit_after>from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
requirements = [line.strip() for line in open('requirements.txt').readlines()]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(['linear_solver/tests/tests.py'])
sys.exit(errcode)
setup(name='linear_solver',
version='0.0',
description='',
url='http://github.com/tcmoore3/linear_solver',
author='Timothy C. Moore',
author_email='timothy.c.moore@vanderbilt.edu',
license='MIT',
packages=find_packages(),
package_data={'linear_solver.testing': ['reference/*']},
install_requires=requirements,
zip_safe=False,
test_suite='linear_solver',
cmdclass={'test': PyTest},
extras_require={'utils': ['pytest']},
)
|
e848724c65f5ce2434d866543ba9587ac223d56e
|
premis_event_service/forms.py
|
premis_event_service/forms.py
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
)
)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
)
)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
)
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
required=False)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
required=False)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
),
required=False)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
),
required=False)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
required=False,
)
|
Make all search form fields optional.
|
Make all search form fields optional.
|
Python
|
bsd-3-clause
|
unt-libraries/django-premis-event-service,unt-libraries/django-premis-event-service,unt-libraries/django-premis-event-service
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
)
)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
)
)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
)
Make all search form fields optional.
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
required=False)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
required=False)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
),
required=False)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
),
required=False)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
required=False,
)
|
<commit_before>from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
)
)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
)
)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
)
<commit_msg>Make all search form fields optional.<commit_after>
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
required=False)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
required=False)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
),
required=False)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
),
required=False)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
required=False,
)
|
from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
)
)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
)
)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
)
Make all search form fields optional.from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
required=False)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
required=False)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
),
required=False)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
),
required=False)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
required=False,
)
|
<commit_before>from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
)
)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
)
)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
)
<commit_msg>Make all search form fields optional.<commit_after>from django import forms
import settings
OUTCOME_CHOICES = settings.EVENT_OUTCOME_CHOICES
EVENT_TYPE_CHOICES = settings.EVENT_TYPE_CHOICES
class EventSearchForm(forms.Form):
outcome = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-small',
}
),
choices=OUTCOME_CHOICES,
required=False)
event_type = forms.ChoiceField(
widget=forms.Select(
attrs={
'id': 'prependedInput',
'class': 'input-medium',
}
),
choices=EVENT_TYPE_CHOICES,
required=False)
start_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'startdatepicker',
'placeholder': 'Start Date',
'class': 'input-small',
}
),
required=False)
end_date = forms.DateField(
widget=forms.DateInput(
attrs={
'id': 'enddatepicker',
'placeholder': 'End Date',
'class': 'input-small',
}
),
required=False)
linked_object_id = forms.CharField(
widget=forms.TextInput(
attrs={
'placeholder': 'Linked Object ID',
'class': 'input-medium',
}
),
max_length=20,
required=False,
)
|
fff41b90b96f0a12a69afcf342812bdf24538e64
|
setup.py
|
setup.py
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud",
"paramiko"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
Add paramiko to install_requires since libcloud deploy_node() requires it.
|
Add paramiko to install_requires since libcloud deploy_node() requires it.
|
Python
|
agpl-3.0
|
fedora-infra/fedimg,fedora-infra/fedimg
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
Add paramiko to install_requires since libcloud deploy_node() requires it.
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud",
"paramiko"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
<commit_before>#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
<commit_msg>Add paramiko to install_requires since libcloud deploy_node() requires it.<commit_after>
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud",
"paramiko"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
Add paramiko to install_requires since libcloud deploy_node() requires it.#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud",
"paramiko"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
<commit_before>#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
<commit_msg>Add paramiko to install_requires since libcloud deploy_node() requires it.<commit_after>#!/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='fedimg',
version='0.0.1',
description='Service to automatically upload built Fedora images \
to internal and external cloud providers.',
classifiers=[
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License \
v3 or later (AGPLv3+)",
],
keywords='python Fedora cloud image uploader',
author='David Gay',
author_email='oddshocks@riseup.net',
url='https://github.com/oddshocks/fedimg',
license='AGPLv3+',
include_package_data=True,
zip_safe=False,
install_requires=["fedmsg",
"apache-libcloud",
"paramiko"],
packages=[],
entry_points="""
[moksha.consumer]
kojiconsumer = fedimg.consumers:KojiConsumer
""",
)
|
231edf9e0e6eaf6a0fb82f25173164da53b206b8
|
setup.py
|
setup.py
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
package_data={'hxl_proxy': ['*.sql']},
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
Include SQL files in the distribution.
|
Include SQL files in the distribution.
|
Python
|
unlicense
|
HXLStandard/hxl-proxy,HXLStandard/hxl-proxy,HXLStandard/hxl-proxy,HXLStandard/hxl-proxy
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
Include SQL files in the distribution.
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
package_data={'hxl_proxy': ['*.sql']},
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
<commit_before>#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
<commit_msg>Include SQL files in the distribution.<commit_after>
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
package_data={'hxl_proxy': ['*.sql']},
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
Include SQL files in the distribution.#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
package_data={'hxl_proxy': ['*.sql']},
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
<commit_before>#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
<commit_msg>Include SQL files in the distribution.<commit_after>#!/usr/bin/python
"""Install, build, or test the HXL Proxy.
For details, try
python setup.py -h
"""
import sys, setuptools
if sys.version_info.major != 3:
raise SystemExit("The HXL Proxy requires Python 3.x")
setuptools.setup(
name = 'hxl-proxy',
packages = ['hxl_proxy'],
package_data={'hxl_proxy': ['*.sql']},
version = '1.6',
description = 'Flask-based web proxy for HXL',
author='David Megginson',
author_email='contact@megginson.com',
url='https://github.com/HXLStandard/hxl-proxy',
include_package_data = True,
zip_safe = False,
install_requires=['flask-cache>=0.13', 'libhxl>=4.6', 'ckanapi>=3.5', 'flask>=0.10', 'requests_cache', 'mysql-connector-python'],
test_suite = "tests",
tests_require = ['mock']
)
|
0de340d41e44bb1057ead9f8d61b47f32732eabb
|
start.py
|
start.py
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", tags, releases
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", [t.version for t in tags], [r.version for r in releases]
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
Fix version printing of tags and releases
|
Fix version printing of tags and releases
|
Python
|
mit
|
ayushgoel/LongShot
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", tags, releases
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
Fix version printing of tags and releases
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", [t.version for t in tags], [r.version for r in releases]
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
<commit_before>import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", tags, releases
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
<commit_msg>Fix version printing of tags and releases<commit_after>
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", [t.version for t in tags], [r.version for r in releases]
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", tags, releases
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
Fix version printing of tags and releasesimport github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", [t.version for t in tags], [r.version for r in releases]
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
<commit_before>import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", tags, releases
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
<commit_msg>Fix version printing of tags and releases<commit_after>import github
import github_token
import repositories
import tagsparser
import flock
def main():
g = github.Github(github_token.GITHUB_TOKEN)
for repo in repositories.REPOSITORIES:
tags, releases = g.get_tags_and_releases(repo["owner"], repo["name"], repositories.COUNT)
print "Got", [t.version for t in tags], [r.version for r in releases]
unreleased_tags = tagsparser.find_unreleased_tags(tags, releases)
flockML = flock.create_flockML_for_tags(repo["owner"], repo["name"], unreleased_tags)
if flockML:
flock.notify_group_about_missing_release_notes(flockML)
else:
print "No message sending required for {0}".format(repo)
if __name__ == '__main__':
main()
|
c224fdecf174077f3b7a15f056e65b10282fed38
|
tasks.py
|
tasks.py
|
from invoke import Collection
from invocations import docs, testing
# TODO: let from_module specify new name
api = Collection.from_module(docs)
# TODO: maybe allow rolling configuration into it too heh
api.configure({
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
api.name = 'docs'
main = Collection.from_module(docs)
main.name = 'main'
main.configure({
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
from invoke import Collection
from invocations import docs, testing
# Usage doc/API site
api = Collection.from_module(docs, name='docs', config={
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
# Main/about/changelog site
main = Collection.from_module(docs, name='main', config={
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
Use new behavior from newer Invoke
|
Use new behavior from newer Invoke
|
Python
|
lgpl-2.1
|
mirrorcoder/paramiko,thisch/paramiko,paramiko/paramiko,remram44/paramiko,zpzgone/paramiko,dlitz/paramiko,Automatic/paramiko,varunarya10/paramiko,ameily/paramiko,mhdaimi/paramiko,redixin/paramiko,rcorrieri/paramiko,thusoy/paramiko,torkil/paramiko,SebastianDeiss/paramiko,zarr12steven/paramiko,dorianpula/paramiko,toby82/paramiko,esc/paramiko,digitalquacks/paramiko,jorik041/paramiko,CptLemming/paramiko,davidbistolas/paramiko,selboo/paramiko,reaperhulk/paramiko,jaraco/paramiko,anadigi/paramiko,fvicente/paramiko
|
from invoke import Collection
from invocations import docs, testing
# TODO: let from_module specify new name
api = Collection.from_module(docs)
# TODO: maybe allow rolling configuration into it too heh
api.configure({
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
api.name = 'docs'
main = Collection.from_module(docs)
main.name = 'main'
main.configure({
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
Use new behavior from newer Invoke
|
from invoke import Collection
from invocations import docs, testing
# Usage doc/API site
api = Collection.from_module(docs, name='docs', config={
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
# Main/about/changelog site
main = Collection.from_module(docs, name='main', config={
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
<commit_before>from invoke import Collection
from invocations import docs, testing
# TODO: let from_module specify new name
api = Collection.from_module(docs)
# TODO: maybe allow rolling configuration into it too heh
api.configure({
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
api.name = 'docs'
main = Collection.from_module(docs)
main.name = 'main'
main.configure({
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
<commit_msg>Use new behavior from newer Invoke<commit_after>
|
from invoke import Collection
from invocations import docs, testing
# Usage doc/API site
api = Collection.from_module(docs, name='docs', config={
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
# Main/about/changelog site
main = Collection.from_module(docs, name='main', config={
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
from invoke import Collection
from invocations import docs, testing
# TODO: let from_module specify new name
api = Collection.from_module(docs)
# TODO: maybe allow rolling configuration into it too heh
api.configure({
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
api.name = 'docs'
main = Collection.from_module(docs)
main.name = 'main'
main.configure({
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
Use new behavior from newer Invokefrom invoke import Collection
from invocations import docs, testing
# Usage doc/API site
api = Collection.from_module(docs, name='docs', config={
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
# Main/about/changelog site
main = Collection.from_module(docs, name='main', config={
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
<commit_before>from invoke import Collection
from invocations import docs, testing
# TODO: let from_module specify new name
api = Collection.from_module(docs)
# TODO: maybe allow rolling configuration into it too heh
api.configure({
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
api.name = 'docs'
main = Collection.from_module(docs)
main.name = 'main'
main.configure({
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
<commit_msg>Use new behavior from newer Invoke<commit_after>from invoke import Collection
from invocations import docs, testing
# Usage doc/API site
api = Collection.from_module(docs, name='docs', config={
'sphinx.source': 'sites/docs',
'sphinx.target': 'sites/docs/_build',
})
# Main/about/changelog site
main = Collection.from_module(docs, name='main', config={
'sphinx.source': 'sites/main',
'sphinx.target': 'sites/main/_build',
})
ns = Collection(testing.test, docs=api, main=main)
|
de17439cf237d073236fffd0130c883683f1ba28
|
tokens/models.py
|
tokens/models.py
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
@property
def class_name(self):
return ''.join(
map(lambda s: s.title(), self.public_name.split())
)
|
Add class_name property to Token model
|
Add class_name property to Token model
|
Python
|
apache-2.0
|
onyb/ethane,onyb/ethane,onyb/ethane,onyb/ethane
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)Add class_name property to Token model
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
@property
def class_name(self):
return ''.join(
map(lambda s: s.title(), self.public_name.split())
)
|
<commit_before>from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)<commit_msg>Add class_name property to Token model<commit_after>
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
@property
def class_name(self):
return ''.join(
map(lambda s: s.title(), self.public_name.split())
)
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)Add class_name property to Token modelfrom django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
@property
def class_name(self):
return ''.join(
map(lambda s: s.title(), self.public_name.split())
)
|
<commit_before>from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)<commit_msg>Add class_name property to Token model<commit_after>from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .conf import PHASES, TOKEN_TYPES
class Token(models.Model):
public_name = models.CharField(max_length=200)
symbol = models.CharField(max_length=4)
decimals = models.IntegerField(
default=18,
validators=[MaxValueValidator(20), MinValueValidator(0)]
)
phase = models.CharField(
max_length=8,
choices=PHASES,
default=PHASES[0][0],
)
token_type = models.CharField(
max_length=12,
choices=TOKEN_TYPES,
default=TOKEN_TYPES[0][0],
)
@property
def class_name(self):
return ''.join(
map(lambda s: s.title(), self.public_name.split())
)
|
937e257d609c51ad81675168a38792b6083d0e2f
|
graphene/types/union.py
|
graphene/types/union.py
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("An Union cannot be intitialized")
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("A Union cannot be intitialized")
|
Fix typo in Union initialization exception
|
Fix typo in Union initialization exception
|
Python
|
mit
|
graphql-python/graphene,graphql-python/graphene,sjhewitt/graphene,Globegitter/graphene,sjhewitt/graphene,Globegitter/graphene
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("An Union cannot be intitialized")
Fix typo in Union initialization exception
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("A Union cannot be intitialized")
|
<commit_before>import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("An Union cannot be intitialized")
<commit_msg>Fix typo in Union initialization exception<commit_after>
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("A Union cannot be intitialized")
|
import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("An Union cannot be intitialized")
Fix typo in Union initialization exceptionimport six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("A Union cannot be intitialized")
|
<commit_before>import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("An Union cannot be intitialized")
<commit_msg>Fix typo in Union initialization exception<commit_after>import six
from ..utils.is_base_type import is_base_type
from .options import Options
class UnionMeta(type):
def __new__(cls, name, bases, attrs):
# Also ensure initialization is only performed for subclasses of
# Union
if not is_base_type(bases, UnionMeta):
return type.__new__(cls, name, bases, attrs)
options = Options(
attrs.pop('Meta', None),
name=name,
description=attrs.get('__doc__'),
types=(),
)
assert (
isinstance(options.types, (list, tuple)) and
len(options.types) > 0
), 'Must provide types for Union {}.'.format(options.name)
return type.__new__(cls, name, bases, dict(attrs, _meta=options))
def __str__(cls): # noqa: N805
return cls._meta.name
class Union(six.with_metaclass(UnionMeta)):
'''
Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type
is used to describe what types are possible as well as providing a function
to determine which type is actually used when the field is resolved.
'''
resolve_type = None
def __init__(self, *args, **kwargs):
raise Exception("A Union cannot be intitialized")
|
22186f8ac1033f9e98add968fc983c7281aaf4b5
|
scrapy-webscanner/scanner/rules/regexrule.py
|
scrapy-webscanner/scanner/rules/regexrule.py
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string, regex.DOTALL)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
Use DOTALL flag on regex.
|
Use DOTALL flag on regex.
|
Python
|
mpl-2.0
|
os2webscanner/os2webscanner,os2webscanner/os2webscanner,os2webscanner/os2webscanner,os2webscanner/os2webscanner
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
Use DOTALL flag on regex.
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string, regex.DOTALL)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
<commit_before># The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
<commit_msg>Use DOTALL flag on regex.<commit_after>
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string, regex.DOTALL)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
Use DOTALL flag on regex.# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string, regex.DOTALL)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
<commit_before># The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
<commit_msg>Use DOTALL flag on regex.<commit_after># The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import regex
from rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, match_string, sensitivity):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
self.name = name
self.regex = regex.compile(match_string, regex.DOTALL)
self.sensitivity = sensitivity
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
re_matches = self.regex.finditer(text)
for match in re_matches:
matches.add(MatchItem(matched_data=match.group(0),
sensitivity=self.sensitivity))
return matches
|
430b3c1267e79ccfa79d120f0c89112fea7c5cc3
|
LiSE/setup.py
|
LiSE/setup.py
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a6",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a7",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
Correct the version number in LiSE too
|
Correct the version number in LiSE too
|
Python
|
agpl-3.0
|
LogicalDash/LiSE,LogicalDash/LiSE
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a6",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
Correct the version number in LiSE too
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a7",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
<commit_before># This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a6",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
<commit_msg>Correct the version number in LiSE too<commit_after>
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a7",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a6",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
Correct the version number in LiSE too# This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a7",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
<commit_before># This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a6",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
<commit_msg>Correct the version number in LiSE too<commit_after># This file is part of LiSE, a framework for life simulation games.
# Copyright (c) Zachary Spector, zacharyspector@gmail.com
import sys
if sys.version_info[0] < 3 or (
sys.version_info[0] == 3 and
sys.version_info[1] < 3
):
raise RuntimeError("LiSE requires Python 3.3 or later")
from setuptools import setup
setup(
name="LiSE",
version="0.0.0a7",
description="Rules engine for life simulation games",
author="Zachary Spector",
author_email="zacharyspector@gmail.com",
license="GPL3",
keywords="game simulation",
url="https://github.com/LogicalDash/LiSE",
packages=[
"LiSE",
"LiSE.server",
"LiSE.examples"
],
package_data={
'LiSE': ['sqlite.json']
},
install_requires=[
"allegedb>=0.10.0",
],
)
|
e526ebe84159bde0be325ec561cc728ab7c0daee
|
src/zeit/edit/testing.py
|
src/zeit/edit/testing.py
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
skin = 'vivi'
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
Use vivi-Layer since the editor resides on that
|
Use vivi-Layer since the editor resides on that
|
Python
|
bsd-3-clause
|
ZeitOnline/zeit.edit,ZeitOnline/zeit.edit,ZeitOnline/zeit.edit
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
Use vivi-Layer since the editor resides on that
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
skin = 'vivi'
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
<commit_before># Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
<commit_msg>Use vivi-Layer since the editor resides on that<commit_after>
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
skin = 'vivi'
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
Use vivi-Layer since the editor resides on that# Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
skin = 'vivi'
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
<commit_before># Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
<commit_msg>Use vivi-Layer since the editor resides on that<commit_after># Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import gocept.lxml.interfaces
import gocept.selenium.ztk
import grokcore.component as grok
import zeit.cms.testing
import zeit.edit.container
import zeit.edit.interfaces
ZCML_LAYER = zeit.cms.testing.ZCMLLayer('ftesting.zcml')
SELENIUM_LAYER = gocept.selenium.ztk.Layer(ZCML_LAYER)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class SeleniumTestCase(zeit.cms.testing.SeleniumTestCase):
layer = SELENIUM_LAYER
skin = 'vivi'
class IContainer(zeit.edit.interfaces.IArea,
zeit.edit.interfaces.IBlock):
pass
class IBlock(zeit.edit.interfaces.IBlock):
pass
class Container(zeit.edit.container.TypeOnAttributeContainer,
grok.MultiAdapter):
grok.implements(IContainer)
grok.provides(IContainer)
grok.adapts(
IContainer,
gocept.lxml.interfaces.IObjectified)
grok.name('container')
zeit.edit.block.register_element_factory(IContainer, 'container', 'Container')
class Block(zeit.edit.block.SimpleElement):
area = IContainer
grok.implements(IBlock)
type = 'block'
zeit.edit.block.register_element_factory(IContainer, 'block', 'Block')
|
13ab83d88739baccbff204d20f9782e0db447cdc
|
voteswap/urls.py
|
voteswap/urls.py
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
]
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
]
|
Add logout url, but it redirects to /admin
|
Add logout url, but it redirects to /admin
|
Python
|
mit
|
sbuss/voteswap,sbuss/voteswap,sbuss/voteswap,sbuss/voteswap
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
]
Add logout url, but it redirects to /admin
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
]
|
<commit_before>"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
]
<commit_msg>Add logout url, but it redirects to /admin<commit_after>
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
]
|
"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
]
Add logout url, but it redirects to /admin"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
]
|
<commit_before>"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
]
<commit_msg>Add logout url, but it redirects to /admin<commit_after>"""voteswap URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from voteswap.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
url('^$', index, name='index'),
url('^logout/$', 'django.contrib.auth.views.logout', name='logout'),
]
|
12d927a8fa7d5cf45276fe8e1049cc6cd355cc03
|
client/ai.py
|
client/ai.py
|
import random
# define possible actions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
def make_move(state):
""" Given a game state, decide on a move. """
print('AI making move for state: {}'.format(state))
return random.choice([go_north, go_south, go_east, go_west, rotate_cw, rotate_ccw])
|
import random
# Move definitions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
shoot = 'shoot'
def make_move(state):
""" Given a game state, decide on a move. """
# TODO: Implement AI!
return random.choice([
go_north,
go_south,
go_east,
go_west,
rotate_cw,
rotate_ccw,
shoot,
])
|
Add shoot to random AI
|
Add shoot to random AI
|
Python
|
mit
|
supermitch/mech-ai,supermitch/mech-ai,supermitch/mech-ai
|
import random
# define possible actions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
def make_move(state):
""" Given a game state, decide on a move. """
print('AI making move for state: {}'.format(state))
return random.choice([go_north, go_south, go_east, go_west, rotate_cw, rotate_ccw])
Add shoot to random AI
|
import random
# Move definitions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
shoot = 'shoot'
def make_move(state):
""" Given a game state, decide on a move. """
# TODO: Implement AI!
return random.choice([
go_north,
go_south,
go_east,
go_west,
rotate_cw,
rotate_ccw,
shoot,
])
|
<commit_before>import random
# define possible actions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
def make_move(state):
""" Given a game state, decide on a move. """
print('AI making move for state: {}'.format(state))
return random.choice([go_north, go_south, go_east, go_west, rotate_cw, rotate_ccw])
<commit_msg>Add shoot to random AI<commit_after>
|
import random
# Move definitions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
shoot = 'shoot'
def make_move(state):
""" Given a game state, decide on a move. """
# TODO: Implement AI!
return random.choice([
go_north,
go_south,
go_east,
go_west,
rotate_cw,
rotate_ccw,
shoot,
])
|
import random
# define possible actions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
def make_move(state):
""" Given a game state, decide on a move. """
print('AI making move for state: {}'.format(state))
return random.choice([go_north, go_south, go_east, go_west, rotate_cw, rotate_ccw])
Add shoot to random AIimport random
# Move definitions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
shoot = 'shoot'
def make_move(state):
""" Given a game state, decide on a move. """
# TODO: Implement AI!
return random.choice([
go_north,
go_south,
go_east,
go_west,
rotate_cw,
rotate_ccw,
shoot,
])
|
<commit_before>import random
# define possible actions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
def make_move(state):
""" Given a game state, decide on a move. """
print('AI making move for state: {}'.format(state))
return random.choice([go_north, go_south, go_east, go_west, rotate_cw, rotate_ccw])
<commit_msg>Add shoot to random AI<commit_after>import random
# Move definitions
go_north = 'go north'
go_south = 'go south'
go_east = 'go east'
go_west = 'go west'
rotate_cw = 'rotate cw'
rotate_ccw = 'rotate ccw'
wait = 'wait'
shoot = 'shoot'
def make_move(state):
""" Given a game state, decide on a move. """
# TODO: Implement AI!
return random.choice([
go_north,
go_south,
go_east,
go_west,
rotate_cw,
rotate_ccw,
shoot,
])
|
29c68602396f04f57de587231b87c9e137d51412
|
celery/discovery.py
|
celery/discovery.py
|
import imp
from django.conf import settings
from django.core import exceptions
__all__ = ["autodiscover", "tasks_for_app", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [tasks_for_app(app)
for app in settings.INSTALLED_APPS])
def tasks_for_app(app):
"""Given an application name, imports any tasks.py file for that app."""
def found_tasks_module_handler(app_path, app_basename):
return __import__("%s.tasks" % app)
return find_related_module(app, "tasks", found_tasks_module_handler)
def find_related_module(app, related_name, handler):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
# See django.contrib.admin.autodiscover for an explanation of this code.
try:
app_basename = app.split('.')[-1]
app_path = __import__(app, {}, {}, app_basename).__path__
except AttributeError:
return None
try:
imp.find_module(related_name, app_path)
except ImportError:
return None
return handler(app_path, app_basename)
|
from django.conf import settings
__all__ = ["autodiscover", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [find_related_module(app, "tasks")
for app in settings.INSTALLED_APPS])
def find_related_module(app, related_name):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
try:
module = __import__(app, {}, {}, [related_name])
except ImportError:
return None
try:
related_module = getattr(module, related_name)
except AttributeError:
return None
return related_module
|
Make autodiscover() work with zipped eggs.
|
Make autodiscover() work with zipped eggs.
|
Python
|
bsd-3-clause
|
ask/celery,mitsuhiko/celery,ask/celery,WoLpH/celery,frac/celery,mitsuhiko/celery,frac/celery,cbrepo/celery,cbrepo/celery,WoLpH/celery
|
import imp
from django.conf import settings
from django.core import exceptions
__all__ = ["autodiscover", "tasks_for_app", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [tasks_for_app(app)
for app in settings.INSTALLED_APPS])
def tasks_for_app(app):
"""Given an application name, imports any tasks.py file for that app."""
def found_tasks_module_handler(app_path, app_basename):
return __import__("%s.tasks" % app)
return find_related_module(app, "tasks", found_tasks_module_handler)
def find_related_module(app, related_name, handler):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
# See django.contrib.admin.autodiscover for an explanation of this code.
try:
app_basename = app.split('.')[-1]
app_path = __import__(app, {}, {}, app_basename).__path__
except AttributeError:
return None
try:
imp.find_module(related_name, app_path)
except ImportError:
return None
return handler(app_path, app_basename)
Make autodiscover() work with zipped eggs.
|
from django.conf import settings
__all__ = ["autodiscover", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [find_related_module(app, "tasks")
for app in settings.INSTALLED_APPS])
def find_related_module(app, related_name):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
try:
module = __import__(app, {}, {}, [related_name])
except ImportError:
return None
try:
related_module = getattr(module, related_name)
except AttributeError:
return None
return related_module
|
<commit_before>import imp
from django.conf import settings
from django.core import exceptions
__all__ = ["autodiscover", "tasks_for_app", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [tasks_for_app(app)
for app in settings.INSTALLED_APPS])
def tasks_for_app(app):
"""Given an application name, imports any tasks.py file for that app."""
def found_tasks_module_handler(app_path, app_basename):
return __import__("%s.tasks" % app)
return find_related_module(app, "tasks", found_tasks_module_handler)
def find_related_module(app, related_name, handler):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
# See django.contrib.admin.autodiscover for an explanation of this code.
try:
app_basename = app.split('.')[-1]
app_path = __import__(app, {}, {}, app_basename).__path__
except AttributeError:
return None
try:
imp.find_module(related_name, app_path)
except ImportError:
return None
return handler(app_path, app_basename)
<commit_msg>Make autodiscover() work with zipped eggs.<commit_after>
|
from django.conf import settings
__all__ = ["autodiscover", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [find_related_module(app, "tasks")
for app in settings.INSTALLED_APPS])
def find_related_module(app, related_name):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
try:
module = __import__(app, {}, {}, [related_name])
except ImportError:
return None
try:
related_module = getattr(module, related_name)
except AttributeError:
return None
return related_module
|
import imp
from django.conf import settings
from django.core import exceptions
__all__ = ["autodiscover", "tasks_for_app", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [tasks_for_app(app)
for app in settings.INSTALLED_APPS])
def tasks_for_app(app):
"""Given an application name, imports any tasks.py file for that app."""
def found_tasks_module_handler(app_path, app_basename):
return __import__("%s.tasks" % app)
return find_related_module(app, "tasks", found_tasks_module_handler)
def find_related_module(app, related_name, handler):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
# See django.contrib.admin.autodiscover for an explanation of this code.
try:
app_basename = app.split('.')[-1]
app_path = __import__(app, {}, {}, app_basename).__path__
except AttributeError:
return None
try:
imp.find_module(related_name, app_path)
except ImportError:
return None
return handler(app_path, app_basename)
Make autodiscover() work with zipped eggs.from django.conf import settings
__all__ = ["autodiscover", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [find_related_module(app, "tasks")
for app in settings.INSTALLED_APPS])
def find_related_module(app, related_name):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
try:
module = __import__(app, {}, {}, [related_name])
except ImportError:
return None
try:
related_module = getattr(module, related_name)
except AttributeError:
return None
return related_module
|
<commit_before>import imp
from django.conf import settings
from django.core import exceptions
__all__ = ["autodiscover", "tasks_for_app", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [tasks_for_app(app)
for app in settings.INSTALLED_APPS])
def tasks_for_app(app):
"""Given an application name, imports any tasks.py file for that app."""
def found_tasks_module_handler(app_path, app_basename):
return __import__("%s.tasks" % app)
return find_related_module(app, "tasks", found_tasks_module_handler)
def find_related_module(app, related_name, handler):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
# See django.contrib.admin.autodiscover for an explanation of this code.
try:
app_basename = app.split('.')[-1]
app_path = __import__(app, {}, {}, app_basename).__path__
except AttributeError:
return None
try:
imp.find_module(related_name, app_path)
except ImportError:
return None
return handler(app_path, app_basename)
<commit_msg>Make autodiscover() work with zipped eggs.<commit_after>from django.conf import settings
__all__ = ["autodiscover", "find_related_module"]
def autodiscover():
"""Include tasks for all applications in settings.INSTALLED_APPS."""
return filter(None, [find_related_module(app, "tasks")
for app in settings.INSTALLED_APPS])
def find_related_module(app, related_name):
"""Given an application name and a module name, tries to find that
module in the application, and running handler' if it finds it.
"""
try:
module = __import__(app, {}, {}, [related_name])
except ImportError:
return None
try:
related_module = getattr(module, related_name)
except AttributeError:
return None
return related_module
|
1d1fec3287abbddfb376ff1fcbcc85bbcf0b44a2
|
pyoanda/tests/test_client.py
|
pyoanda/tests/test_client.py
|
import unittest
from ..client import Client
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
pass
|
import unittest
from unittest.mock import patch
from ..client import Client
from ..exceptions import BadCredentials
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect_pass(self):
with patch.object(Client, '_Client__get_credentials', return_value=True) as mock_method:
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
def test_connect_fail(self):
with patch.object(Client, '_Client__get_credentials', return_value=False) as mock_method:
with self.assertRaises(BadCredentials):
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
|
Add very simple client creator validator
|
Add very simple client creator validator
|
Python
|
mit
|
MonoCloud/pyoanda,toloco/pyoanda,elyobo/pyoanda
|
import unittest
from ..client import Client
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
passAdd very simple client creator validator
|
import unittest
from unittest.mock import patch
from ..client import Client
from ..exceptions import BadCredentials
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect_pass(self):
with patch.object(Client, '_Client__get_credentials', return_value=True) as mock_method:
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
def test_connect_fail(self):
with patch.object(Client, '_Client__get_credentials', return_value=False) as mock_method:
with self.assertRaises(BadCredentials):
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
|
<commit_before>import unittest
from ..client import Client
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
pass<commit_msg>Add very simple client creator validator<commit_after>
|
import unittest
from unittest.mock import patch
from ..client import Client
from ..exceptions import BadCredentials
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect_pass(self):
with patch.object(Client, '_Client__get_credentials', return_value=True) as mock_method:
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
def test_connect_fail(self):
with patch.object(Client, '_Client__get_credentials', return_value=False) as mock_method:
with self.assertRaises(BadCredentials):
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
|
import unittest
from ..client import Client
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
passAdd very simple client creator validatorimport unittest
from unittest.mock import patch
from ..client import Client
from ..exceptions import BadCredentials
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect_pass(self):
with patch.object(Client, '_Client__get_credentials', return_value=True) as mock_method:
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
def test_connect_fail(self):
with patch.object(Client, '_Client__get_credentials', return_value=False) as mock_method:
with self.assertRaises(BadCredentials):
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
|
<commit_before>import unittest
from ..client import Client
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect(self):
pass<commit_msg>Add very simple client creator validator<commit_after>import unittest
from unittest.mock import patch
from ..client import Client
from ..exceptions import BadCredentials
class TestClient(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_connect_pass(self):
with patch.object(Client, '_Client__get_credentials', return_value=True) as mock_method:
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
def test_connect_fail(self):
with patch.object(Client, '_Client__get_credentials', return_value=False) as mock_method:
with self.assertRaises(BadCredentials):
c = Client(
"http://mydomain.com",
"http://mystreamingdomain.com",
"my_account",
"my_token"
)
|
49a6a89d7666fc4369b034bcf79d3bd794a468c5
|
partner_industry_secondary/models/res_partner.py
|
partner_industry_secondary/models/res_partner.py
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
Make api constrains multi to avoid error when create a company with 2 contacts
|
[FIX] partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts
|
Python
|
agpl-3.0
|
Vauxoo/partner-contact,Vauxoo/partner-contact
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
[FIX] partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
<commit_before># Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
<commit_msg>[FIX] partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts<commit_after>
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
[FIX] partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts# Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
<commit_before># Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
if self.industry_id in self.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
<commit_msg>[FIX] partner_industry_Secondary: Make api constrains multi to avoid error when create a company with 2 contacts<commit_after># Copyright 2015 Antiun Ingenieria S.L. - Javier Iniesta
# Copyright 2016 Tecnativa S.L. - Vicent Cubells
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, exceptions, fields, models, _
class ResPartner(models.Model):
_inherit = 'res.partner'
industry_id = fields.Many2one(string='Main Industry')
secondary_industry_ids = fields.Many2many(
comodel_name='res.partner.industry', string="Secondary Industries",
domain="[('id', '!=', industry_id)]")
@api.constrains('industry_id', 'secondary_industry_ids')
def _check_industries(self):
for partner in self:
if partner.industry_id in partner.secondary_industry_ids:
raise exceptions.ValidationError(
_('The main industry must be different '
'from the secondary industries.'))
|
9efa2ccc3067d20dc50fd5e3746b291cc670af90
|
rembed/test/response_test.py
|
rembed/test/response_test.py
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.title, equal_to('Bees'))
def test_response_should_be_immutable():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.title = 'Wasps'
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.type, equal_to('link'))
def test_response_should_be_immutable():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.type = 'photo'
|
Make test match spec more closely
|
Make test match spec more closely
|
Python
|
mit
|
tino/pyembed,pyembed/pyembed,pyembed/pyembed
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.title, equal_to('Bees'))
def test_response_should_be_immutable():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.title = 'Wasps'Make test match spec more closely
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.type, equal_to('link'))
def test_response_should_be_immutable():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.type = 'photo'
|
<commit_before>from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.title, equal_to('Bees'))
def test_response_should_be_immutable():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.title = 'Wasps'<commit_msg>Make test match spec more closely<commit_after>
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.type, equal_to('link'))
def test_response_should_be_immutable():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.type = 'photo'
|
from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.title, equal_to('Bees'))
def test_response_should_be_immutable():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.title = 'Wasps'Make test match spec more closelyfrom rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.type, equal_to('link'))
def test_response_should_be_immutable():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.type = 'photo'
|
<commit_before>from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.title, equal_to('Bees'))
def test_response_should_be_immutable():
dict = {'title' : 'Bees'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.title = 'Wasps'<commit_msg>Make test match spec more closely<commit_after>from rembed import response
from hamcrest import *
import pytest
def test_should_load_from_dictionary():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
assert_that(oembed_response.type, equal_to('link'))
def test_response_should_be_immutable():
dict = {'type' : 'link', 'version' : '1.0'}
oembed_response = response.OEmbedResponse(dict)
with pytest.raises(TypeError):
oembed_response.type = 'photo'
|
ca6d80429cb8ccdac7669b444e5b4d2e88aed098
|
site/cgi/csv-columns.py
|
site/cgi/csv-columns.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
c2 = [ n.encode('utf-8') for n in col_names ]
response = { 'columns' : c2 }
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ 'columns' : [%s] }\r
''' % ( "'" + "','".join(col_names).encode('utf-8') + "'", )
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ "columns" : [%s] }\r
''' % ( '"' + '","'.join(col_names).encode('utf-8') + '"', )
|
Fix column listing, use double quotes for JSON remove old stuff
|
Fix column listing, use double quotes for JSON remove old stuff
|
Python
|
agpl-3.0
|
alejosanchez/CSVBenford,alejosanchez/CSVBenford
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
c2 = [ n.encode('utf-8') for n in col_names ]
response = { 'columns' : c2 }
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ 'columns' : [%s] }\r
''' % ( "'" + "','".join(col_names).encode('utf-8') + "'", )
Fix column listing, use double quotes for JSON remove old stuff
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ "columns" : [%s] }\r
''' % ( '"' + '","'.join(col_names).encode('utf-8') + '"', )
|
<commit_before>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
c2 = [ n.encode('utf-8') for n in col_names ]
response = { 'columns' : c2 }
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ 'columns' : [%s] }\r
''' % ( "'" + "','".join(col_names).encode('utf-8') + "'", )
<commit_msg>Fix column listing, use double quotes for JSON remove old stuff<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ "columns" : [%s] }\r
''' % ( '"' + '","'.join(col_names).encode('utf-8') + '"', )
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
c2 = [ n.encode('utf-8') for n in col_names ]
response = { 'columns' : c2 }
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ 'columns' : [%s] }\r
''' % ( "'" + "','".join(col_names).encode('utf-8') + "'", )
Fix column listing, use double quotes for JSON remove old stuff#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ "columns" : [%s] }\r
''' % ( '"' + '","'.join(col_names).encode('utf-8') + '"', )
|
<commit_before>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
c2 = [ n.encode('utf-8') for n in col_names ]
response = { 'columns' : c2 }
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ 'columns' : [%s] }\r
''' % ( "'" + "','".join(col_names).encode('utf-8') + "'", )
<commit_msg>Fix column listing, use double quotes for JSON remove old stuff<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Give back the columns of a CSV and the in
# http://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
import csv
import sys
import codecs
import cgitb
CSV_DIR = '../csv/' # CSV upload directory
# UTF-8 hack
# from http://stackoverflow.com/a/11764727
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# If you need input too, read from char_stream as you would sys.stdin
#char_stream = codecs.getreader('utf-8')(sys.stdin)
# python 2.x sys.stdout.encoding by default is None
# better option would be setting os.environ.get('PYTHONIOENCODING') to UTF-8
cgitb.enable() # pretty debugging
form = cgi.FieldStorage()
filename = form.getvalue('dataset')
f = open(CSV_DIR + filename, 'r')
r = csv.reader(f, dialect=csv.excel) # Create CSV row reader
col_names = next(r)
print '''\
Status: 200\r
Content-Type: application/json;charset=UTF-8\r
\r
{ "columns" : [%s] }\r
''' % ( '"' + '","'.join(col_names).encode('utf-8') + '"', )
|
8d40e514228e93af50fbe5264b15f79c2832de46
|
plantcv/plantcv/plot_image.py
|
plantcv/plantcv/plot_image.py
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
Remove Agg backend in plot
|
Remove Agg backend in plot
Looks like applying the Agg backend in the plot_image function disables displaying images in Jupyter because the Agg backed is a non-GUI backend.
|
Python
|
mit
|
danforthcenter/plantcv,danforthcenter/plantcv,stiphyMT/plantcv,stiphyMT/plantcv,stiphyMT/plantcv,danforthcenter/plantcv
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
Remove Agg backend in plot
Looks like applying the Agg backend in the plot_image function disables displaying images in Jupyter because the Agg backed is a non-GUI backend.
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
<commit_before># Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
<commit_msg>Remove Agg backend in plot
Looks like applying the Agg backend in the plot_image function disables displaying images in Jupyter because the Agg backed is a non-GUI backend.<commit_after>
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
Remove Agg backend in plot
Looks like applying the Agg backend in the plot_image function disables displaying images in Jupyter because the Agg backed is a non-GUI backend.# Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
<commit_before># Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
import matplotlib
matplotlib.use('Agg', warn=False)
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
<commit_msg>Remove Agg backend in plot
Looks like applying the Agg backend in the plot_image function disables displaying images in Jupyter because the Agg backed is a non-GUI backend.<commit_after># Plot image to screen
import os
import cv2
import numpy as np
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
from matplotlib import pyplot as plt
dimensions = np.shape(img)
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.imshow(img, cmap=cmap)
plt.show()
|
b11750b83e0fe99bb3c0a058d88ca21d0a64c332
|
data/load_data.py
|
data/load_data.py
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoints." % count
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
if len(points) > 50:
DataPoint.objects.bulk_create(points)
points = []
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoint(s)." % count
|
Add fix for sqlite bulk_create breaking with loading lots of points
|
Add fix for sqlite bulk_create breaking with loading lots of points
|
Python
|
mit
|
crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoints." % countAdd fix for sqlite bulk_create breaking with loading lots of points
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
if len(points) > 50:
DataPoint.objects.bulk_create(points)
points = []
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoint(s)." % count
|
<commit_before>import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoints." % count<commit_msg>Add fix for sqlite bulk_create breaking with loading lots of points<commit_after>
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
if len(points) > 50:
DataPoint.objects.bulk_create(points)
points = []
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoint(s)." % count
|
import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoints." % countAdd fix for sqlite bulk_create breaking with loading lots of pointsimport csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
if len(points) > 50:
DataPoint.objects.bulk_create(points)
points = []
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoint(s)." % count
|
<commit_before>import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoints." % count<commit_msg>Add fix for sqlite bulk_create breaking with loading lots of points<commit_after>import csv
from chemtools.ml import get_decay_feature_vector
from chemtools.mol_name import get_exact_name
from models import DataPoint
def main(path):
with open(path, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
count = 0
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
try:
decay_feature = get_decay_feature_vector(exact_name)
except:
decay_feature = None
except:
exact_name = None
decay_feature = None
point = DataPoint(
name=row[1], options=row[4],
homo=row[5], lumo=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name,
decay_feature=decay_feature)
point.clean_fields()
points.append(point)
count += 1
if len(points) > 50:
DataPoint.objects.bulk_create(points)
points = []
except Exception as e:
pass
DataPoint.objects.bulk_create(points)
print "Added %d datapoint(s)." % count
|
806b19db6f50d63f5b0893e9d695f32830890dd2
|
crm/tests/test_contact_user.py
|
crm/tests/test_contact_user.py
|
from django.contrib.auth.models import User
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
|
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
def test_one_contact_per_user(self):
"""Make sure a user can only link to one contact"""
fred = make_user('fred')
jsmith = make_contact('jsmith', 'John Smith')
pkimber = make_contact('pkimber', 'Patrick Kimber')
make_user_contact(fred, pkimber)
self.assertRaises(
IntegrityError,
make_user_contact,
fred,
jsmith,
)
|
Make sure a user can only link to one contact
|
Make sure a user can only link to one contact
|
Python
|
apache-2.0
|
pkimber/crm,pkimber/crm,pkimber/crm
|
from django.contrib.auth.models import User
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
Make sure a user can only link to one contact
|
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
def test_one_contact_per_user(self):
"""Make sure a user can only link to one contact"""
fred = make_user('fred')
jsmith = make_contact('jsmith', 'John Smith')
pkimber = make_contact('pkimber', 'Patrick Kimber')
make_user_contact(fred, pkimber)
self.assertRaises(
IntegrityError,
make_user_contact,
fred,
jsmith,
)
|
<commit_before>from django.contrib.auth.models import User
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
<commit_msg>Make sure a user can only link to one contact<commit_after>
|
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
def test_one_contact_per_user(self):
"""Make sure a user can only link to one contact"""
fred = make_user('fred')
jsmith = make_contact('jsmith', 'John Smith')
pkimber = make_contact('pkimber', 'Patrick Kimber')
make_user_contact(fred, pkimber)
self.assertRaises(
IntegrityError,
make_user_contact,
fred,
jsmith,
)
|
from django.contrib.auth.models import User
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
Make sure a user can only link to one contactfrom django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
def test_one_contact_per_user(self):
"""Make sure a user can only link to one contact"""
fred = make_user('fred')
jsmith = make_contact('jsmith', 'John Smith')
pkimber = make_contact('pkimber', 'Patrick Kimber')
make_user_contact(fred, pkimber)
self.assertRaises(
IntegrityError,
make_user_contact,
fred,
jsmith,
)
|
<commit_before>from django.contrib.auth.models import User
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
<commit_msg>Make sure a user can only link to one contact<commit_after>from django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from crm.tests.model_maker import (
make_contact,
make_user_contact,
)
from login.tests.model_maker import make_user
class TestContactUser(TestCase):
def test_link_user_to_contact(self):
"""Create a contact and link it to a user"""
contact = make_contact(
'pkimber',
'Patrick Kimber',
)
make_user_contact(make_user('fred'), contact)
user = User.objects.get(username='fred')
user_contacts = user.usercontact_set.all()
self.assertIn('Kimber', user_contacts[0].contact.name)
def test_one_contact_per_user(self):
"""Make sure a user can only link to one contact"""
fred = make_user('fred')
jsmith = make_contact('jsmith', 'John Smith')
pkimber = make_contact('pkimber', 'Patrick Kimber')
make_user_contact(fred, pkimber)
self.assertRaises(
IntegrityError,
make_user_contact,
fred,
jsmith,
)
|
7613ec7e2bc9dbd3354934d4fbe8f689a5b89eed
|
cyborg/accelerator/__init__.py
|
cyborg/accelerator/__init__.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'nomad').version_string()
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'cyborg').version_string()
|
Replace all occurences of Nomad with Cyborg
|
Replace all occurences of Nomad with Cyborg
This patch proposes to replace all occurences of
Nomad with Cyborg.
Change-Id: I48a5fc13fd8faa682736d44e6c8cd96eeedaf2e3
|
Python
|
apache-2.0
|
openstack/nomad
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'nomad').version_string()
Replace all occurences of Nomad with Cyborg
This patch proposes to replace all occurences of
Nomad with Cyborg.
Change-Id: I48a5fc13fd8faa682736d44e6c8cd96eeedaf2e3
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'cyborg').version_string()
|
<commit_before># -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'nomad').version_string()
<commit_msg>Replace all occurences of Nomad with Cyborg
This patch proposes to replace all occurences of
Nomad with Cyborg.
Change-Id: I48a5fc13fd8faa682736d44e6c8cd96eeedaf2e3<commit_after>
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'cyborg').version_string()
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'nomad').version_string()
Replace all occurences of Nomad with Cyborg
This patch proposes to replace all occurences of
Nomad with Cyborg.
Change-Id: I48a5fc13fd8faa682736d44e6c8cd96eeedaf2e3# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'cyborg').version_string()
|
<commit_before># -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'nomad').version_string()
<commit_msg>Replace all occurences of Nomad with Cyborg
This patch proposes to replace all occurences of
Nomad with Cyborg.
Change-Id: I48a5fc13fd8faa682736d44e6c8cd96eeedaf2e3<commit_after># -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'cyborg').version_string()
|
02cfc3a53bf3f38dc676359eace0e258bfda682a
|
plotly/graph_objs/__init__.py
|
plotly/graph_objs/__init__.py
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
"""
from __future__ import absolute_import
from plotly.graph_objs.graph_objs import *
__all__ = ["Data",
"Annotations",
"Area",
"Bar",
"Box",
"Contour",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Scatter",
"Annotation",
"AngularAxis",
"ColorBar",
"Contours",
"ErrorX",
"ErrorY",
"Figure",
"Font",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Stream",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins"]
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
The reason for the package graph_objs and the module graph_objs is to provide
a clearer API for users.
"""
from . graph_objs import (
Data, Annotations, Area, Bar, Box, Contour, Heatmap, Histogram,
Histogram2d, Histogram2dContour, Scatter, Annotation, AngularAxis,
ColorBar, Contours, ErrorX, ErrorY, Figure, Font, Layout, Legend,
Line, Margin, Marker, RadialAxis, Stream, Trace, XAxis, XBins, YAxis,
YBins
)
|
Remove the `import *`. Explicitly import what we want to expose.
|
Remove the `import *`. Explicitly import what we want to expose.
Before __all__ was used to manage these. But it's clearer to
just import what we want users to see.
|
Python
|
mit
|
plotly/plotly.py,plotly/python-api,plotly/plotly.py,ee-in/python-api,plotly/plotly.py,ee-in/python-api,plotly/python-api,ee-in/python-api,plotly/python-api
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
"""
from __future__ import absolute_import
from plotly.graph_objs.graph_objs import *
__all__ = ["Data",
"Annotations",
"Area",
"Bar",
"Box",
"Contour",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Scatter",
"Annotation",
"AngularAxis",
"ColorBar",
"Contours",
"ErrorX",
"ErrorY",
"Figure",
"Font",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Stream",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins"]
Remove the `import *`. Explicitly import what we want to expose.
Before __all__ was used to manage these. But it's clearer to
just import what we want users to see.
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
The reason for the package graph_objs and the module graph_objs is to provide
a clearer API for users.
"""
from . graph_objs import (
Data, Annotations, Area, Bar, Box, Contour, Heatmap, Histogram,
Histogram2d, Histogram2dContour, Scatter, Annotation, AngularAxis,
ColorBar, Contours, ErrorX, ErrorY, Figure, Font, Layout, Legend,
Line, Margin, Marker, RadialAxis, Stream, Trace, XAxis, XBins, YAxis,
YBins
)
|
<commit_before>"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
"""
from __future__ import absolute_import
from plotly.graph_objs.graph_objs import *
__all__ = ["Data",
"Annotations",
"Area",
"Bar",
"Box",
"Contour",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Scatter",
"Annotation",
"AngularAxis",
"ColorBar",
"Contours",
"ErrorX",
"ErrorY",
"Figure",
"Font",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Stream",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins"]
<commit_msg>Remove the `import *`. Explicitly import what we want to expose.
Before __all__ was used to manage these. But it's clearer to
just import what we want users to see.<commit_after>
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
The reason for the package graph_objs and the module graph_objs is to provide
a clearer API for users.
"""
from . graph_objs import (
Data, Annotations, Area, Bar, Box, Contour, Heatmap, Histogram,
Histogram2d, Histogram2dContour, Scatter, Annotation, AngularAxis,
ColorBar, Contours, ErrorX, ErrorY, Figure, Font, Layout, Legend,
Line, Margin, Marker, RadialAxis, Stream, Trace, XAxis, XBins, YAxis,
YBins
)
|
"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
"""
from __future__ import absolute_import
from plotly.graph_objs.graph_objs import *
__all__ = ["Data",
"Annotations",
"Area",
"Bar",
"Box",
"Contour",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Scatter",
"Annotation",
"AngularAxis",
"ColorBar",
"Contours",
"ErrorX",
"ErrorY",
"Figure",
"Font",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Stream",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins"]
Remove the `import *`. Explicitly import what we want to expose.
Before __all__ was used to manage these. But it's clearer to
just import what we want users to see."""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
The reason for the package graph_objs and the module graph_objs is to provide
a clearer API for users.
"""
from . graph_objs import (
Data, Annotations, Area, Bar, Box, Contour, Heatmap, Histogram,
Histogram2d, Histogram2dContour, Scatter, Annotation, AngularAxis,
ColorBar, Contours, ErrorX, ErrorY, Figure, Font, Layout, Legend,
Line, Margin, Marker, RadialAxis, Stream, Trace, XAxis, XBins, YAxis,
YBins
)
|
<commit_before>"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
"""
from __future__ import absolute_import
from plotly.graph_objs.graph_objs import *
__all__ = ["Data",
"Annotations",
"Area",
"Bar",
"Box",
"Contour",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Scatter",
"Annotation",
"AngularAxis",
"ColorBar",
"Contours",
"ErrorX",
"ErrorY",
"Figure",
"Font",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Stream",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins"]
<commit_msg>Remove the `import *`. Explicitly import what we want to expose.
Before __all__ was used to manage these. But it's clearer to
just import what we want users to see.<commit_after>"""
graph_objs
==========
This package imports definitions for all of Plotly's graph objects. For more
information, run help(Obj) on any of the following objects defined here.
The reason for the package graph_objs and the module graph_objs is to provide
a clearer API for users.
"""
from . graph_objs import (
Data, Annotations, Area, Bar, Box, Contour, Heatmap, Histogram,
Histogram2d, Histogram2dContour, Scatter, Annotation, AngularAxis,
ColorBar, Contours, ErrorX, ErrorY, Figure, Font, Layout, Legend,
Line, Margin, Marker, RadialAxis, Stream, Trace, XAxis, XBins, YAxis,
YBins
)
|
601fe6fd1fc2f34f7cefe2fac0ff343144d139cc
|
src/ipf/ipfblock/rgb2gray.py
|
src/ipf/ipfblock/rgb2gray.py
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return IPFImage3cType.convert(self.output_ports["output_image"]._value)
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return self.output_ports["output_image"]._value
|
Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)
|
Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)
|
Python
|
lgpl-2.1
|
anton-golubkov/Garland,anton-golubkov/Garland
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return IPFImage3cType.convert(self.output_ports["output_image"]._value)
Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return self.output_ports["output_image"]._value
|
<commit_before># -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return IPFImage3cType.convert(self.output_ports["output_image"]._value)
<commit_msg>Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)<commit_after>
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return self.output_ports["output_image"]._value
|
# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return IPFImage3cType.convert(self.output_ports["output_image"]._value)
Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)# -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return self.output_ports["output_image"]._value
|
<commit_before># -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return IPFImage3cType.convert(self.output_ports["output_image"]._value)
<commit_msg>Change get_preview_image to same as other blocks (because we fix ipl to pil convert for 1-channel images)<commit_after># -*- coding: utf-8 -*-
import ipfblock
import ioport
import ipf.ipfblock.processing
from ipf.ipftype.ipfimage3ctype import IPFImage3cType
from ipf.ipftype.ipfimage1ctype import IPFImage1cType
class RGB2Gray(ipfblock.IPFBlock):
""" Convert 3 channel image to 1 channel gray block class
"""
type = "RGB2Gray"
category = "Channel operations"
is_abstract_block = False
def __init__(self):
super(RGB2Gray, self).__init__()
self.input_ports["input_image"] = ioport.IPort(self, IPFImage3cType)
self.output_ports["output_image"] = ioport.OPort(self, IPFImage1cType)
self.processing_function = ipf.ipfblock.processing.rgb2gray
def get_preview_image(self):
return self.output_ports["output_image"]._value
|
e5f81fddc738d3b5773e4b2c2b2397caa49eb513
|
client/httpd.py
|
client/httpd.py
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=True)
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=False)
|
Disable debug on sample client
|
Disable debug on sample client
|
Python
|
bsd-2-clause
|
PolyLAN/polybanking,PolyLAN/polybanking,PolyLAN/polybanking,PolyLAN/polybanking
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=True)
Disable debug on sample client
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=False)
|
<commit_before>"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=True)
<commit_msg>Disable debug on sample client<commit_after>
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=False)
|
"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=True)
Disable debug on sample client"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=False)
|
<commit_before>"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=True)
<commit_msg>Disable debug on sample client<commit_after>"""This example show a full http server.
"""
from flask import Flask
from flask import render_template
from flask import request
import config
from libs.polybanking import PolyBanking
import uuid
api = PolyBanking(config.POLYBANKING_SERVER, config.CONFIG_ID, config.KEY_REQUESTS, config.KEY_IPN, config.KEY_API)
app = Flask(__name__)
@app.route("/")
def home():
"""Display the home page"""
return render_template('home.html')
@app.route('/start')
def start():
"""Start a new paiement"""
(result, url) = api.new_transaction(request.args.get('amount', ''), str(uuid.uuid4()))
return render_template('start.html', result=result, url=url)
@app.route('/back')
def back():
transaction_list = api.get_transactions(max_transaction=3)
transaction_details = api.get_transaction(transaction_list[0]['reference'])
transaction_logs = api.get_transaction_logs(transaction_list[0]['reference'])
return render_template('back.html', result='ok' in request.args, last_transactions=transaction_list, last_transaction_detail=transaction_details, last_transaction_logs=transaction_logs)
@app.route('/ipn', methods=['POST'])
def ipn():
print api.check_ipn(request.form)
return ''
if __name__ == "__main__":
app.run(debug=False)
|
8834b22654574b71bb891570c77acf2f42eade06
|
lock_tokens/managers.py
|
lock_tokens/managers.py
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj):
contenttype = ContentType.objects.get_for_model(obj)
return self.get(locked_object_content_type=contenttype,
locked_object_id=obj.id,
locked_at__gte=get_oldest_valid_tokens_datetime())
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj, allow_expired=True):
contenttype = ContentType.objects.get_for_model(obj)
lookup_fields = {
'locked_object_content_type': contenttype,
'locked_object_id': obj.id
}
if not allow_expired:
lookup_fields['locked_at__gte'] = get_oldest_valid_tokens_datetime()
return self.get(**lookup_fields)
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj, allow_expired=False), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
Add 'allow_expired' parameter to LockTokenManager.get_for_object
|
Add 'allow_expired' parameter to LockTokenManager.get_for_object
|
Python
|
mit
|
rparent/django-lock-tokens,rparent/django-lock-tokens,rparent/django-lock-tokens
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj):
contenttype = ContentType.objects.get_for_model(obj)
return self.get(locked_object_content_type=contenttype,
locked_object_id=obj.id,
locked_at__gte=get_oldest_valid_tokens_datetime())
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
Add 'allow_expired' parameter to LockTokenManager.get_for_object
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj, allow_expired=True):
contenttype = ContentType.objects.get_for_model(obj)
lookup_fields = {
'locked_object_content_type': contenttype,
'locked_object_id': obj.id
}
if not allow_expired:
lookup_fields['locked_at__gte'] = get_oldest_valid_tokens_datetime()
return self.get(**lookup_fields)
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj, allow_expired=False), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
<commit_before>from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj):
contenttype = ContentType.objects.get_for_model(obj)
return self.get(locked_object_content_type=contenttype,
locked_object_id=obj.id,
locked_at__gte=get_oldest_valid_tokens_datetime())
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
<commit_msg>Add 'allow_expired' parameter to LockTokenManager.get_for_object<commit_after>
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj, allow_expired=True):
contenttype = ContentType.objects.get_for_model(obj)
lookup_fields = {
'locked_object_content_type': contenttype,
'locked_object_id': obj.id
}
if not allow_expired:
lookup_fields['locked_at__gte'] = get_oldest_valid_tokens_datetime()
return self.get(**lookup_fields)
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj, allow_expired=False), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj):
contenttype = ContentType.objects.get_for_model(obj)
return self.get(locked_object_content_type=contenttype,
locked_object_id=obj.id,
locked_at__gte=get_oldest_valid_tokens_datetime())
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
Add 'allow_expired' parameter to LockTokenManager.get_for_objectfrom django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj, allow_expired=True):
contenttype = ContentType.objects.get_for_model(obj)
lookup_fields = {
'locked_object_content_type': contenttype,
'locked_object_id': obj.id
}
if not allow_expired:
lookup_fields['locked_at__gte'] = get_oldest_valid_tokens_datetime()
return self.get(**lookup_fields)
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj, allow_expired=False), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
<commit_before>from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj):
contenttype = ContentType.objects.get_for_model(obj)
return self.get(locked_object_content_type=contenttype,
locked_object_id=obj.id,
locked_at__gte=get_oldest_valid_tokens_datetime())
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
<commit_msg>Add 'allow_expired' parameter to LockTokenManager.get_for_object<commit_after>from django.contrib.contenttypes.models import ContentType
from django.db.models import Manager
from lock_tokens.utils import get_oldest_valid_tokens_datetime
class LockTokenManager(Manager):
def get_for_object(self, obj, allow_expired=True):
contenttype = ContentType.objects.get_for_model(obj)
lookup_fields = {
'locked_object_content_type': contenttype,
'locked_object_id': obj.id
}
if not allow_expired:
lookup_fields['locked_at__gte'] = get_oldest_valid_tokens_datetime()
return self.get(**lookup_fields)
def get_or_create_for_object(self, obj):
try:
return (self.get_for_object(obj, allow_expired=False), False)
except self.model.DoesNotExist:
return (self.create(locked_object=obj), True)
class LockableModelManager(Manager):
def get_and_lock(self, *args, **kwargs):
from lock_tokens.models import LockToken
obj = super(LockableModelManager, self).get(*args, **kwargs)
lock_token = LockToken.objects.create(obj)
return obj, lock_token.serialize()
|
e6e6918b54d691803c48f217f0074d5bcdd9df50
|
endpoint/csp.py
|
endpoint/csp.py
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("ksi-admin@fi.muni.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("me@apophis.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
Send CSP reports right to apophis.
|
Send CSP reports right to apophis.
|
Python
|
mit
|
fi-ksi/web-backend,fi-ksi/web-backend
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("ksi-admin@fi.muni.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
Send CSP reports right to apophis.
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("me@apophis.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
<commit_before># -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("ksi-admin@fi.muni.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
<commit_msg>Send CSP reports right to apophis.<commit_after>
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("me@apophis.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("ksi-admin@fi.muni.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
Send CSP reports right to apophis.# -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("me@apophis.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
<commit_before># -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("ksi-admin@fi.muni.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
<commit_msg>Send CSP reports right to apophis.<commit_after># -*- coding: utf-8 -*-
import falcon, util, json, sys, traceback
# Content-security policy reports of frontend
# Every CSP report is forwarded to ksi-admin@fi.muni.cz.
# This is testing solution, if a lot of spam occurs, some intelligence should
# be added to this endpoint.
class CSP(object):
def on_post(self, req, resp):
data = json.loads(req.stream.read())
text = "<p>" + util.config.ksi_web() + \
"<br><pre>" + json.dumps(data, indent=4) + "</pre></p>" + \
util.mail.easteregg()
try:
util.mail.send("me@apophis.cz", "[KSI-WEB] CSP report", text.decode('utf-8'), )
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
req.context['result'] = {}
resp.status = falcon.HTTP_200
|
c42856ffd6ab8a762ea095fbfbfd7705e1eabd51
|
ideascube/serveradmin/battery.py
|
ideascube/serveradmin/battery.py
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return batteries.stat
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return sorted(batteries.stat, key=lambda b: b.name.lower())
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
Order the batteries by name
|
settings: Order the batteries by name
Eventually we'll want to do better than this, but batinfo doesn't export
what we'd need to do better.
Moving to udev+upower would help, but that's probably something we
should do with cockpit anyway.
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return batteries.stat
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
settings: Order the batteries by name
Eventually we'll want to do better than this, but batinfo doesn't export
what we'd need to do better.
Moving to udev+upower would help, but that's probably something we
should do with cockpit anyway.
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return sorted(batteries.stat, key=lambda b: b.name.lower())
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
<commit_before>import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return batteries.stat
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
<commit_msg>settings: Order the batteries by name
Eventually we'll want to do better than this, but batinfo doesn't export
what we'd need to do better.
Moving to udev+upower would help, but that's probably something we
should do with cockpit anyway.<commit_after>
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return sorted(batteries.stat, key=lambda b: b.name.lower())
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return batteries.stat
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
settings: Order the batteries by name
Eventually we'll want to do better than this, but batinfo doesn't export
what we'd need to do better.
Moving to udev+upower would help, but that's probably something we
should do with cockpit anyway.import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return sorted(batteries.stat, key=lambda b: b.name.lower())
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
<commit_before>import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return batteries.stat
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
<commit_msg>settings: Order the batteries by name
Eventually we'll want to do better than this, but batinfo doesn't export
what we'd need to do better.
Moving to udev+upower would help, but that's probably something we
should do with cockpit anyway.<commit_after>import batinfo
class Lime2Battery(batinfo.Battery):
@property
def status(self):
if self.charging == 0:
return 'Discharging'
elif self.capacity < 100:
return 'Charging'
else:
return 'Full'
def get_batteries():
batteries = batinfo.batteries()
if batteries:
return sorted(batteries.stat, key=lambda b: b.name.lower())
try:
# We might be running on a Lime2 Koombook
# https://github.com/ideascube/ideascube/issues/446#issuecomment-244143565
return [Lime2Battery(path='/sys/power/axp_pmu', name='battery')]
except FileNotFoundError:
return []
|
362827987bb6587e1440f5f3fa804635e426eb5f
|
windpowerlib/__init__.py
|
windpowerlib/__init__.py
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import (
WindTurbine,
get_turbine_types,
create_power_curve,
) # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import WindTurbine # noqa: F401
from .wind_turbine import get_turbine_types # noqa: F401
from .wind_turbine import create_power_curve # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
Use one line per import
|
Use one line per import
|
Python
|
mit
|
wind-python/windpowerlib
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import (
WindTurbine,
get_turbine_types,
create_power_curve,
) # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
Use one line per import
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import WindTurbine # noqa: F401
from .wind_turbine import get_turbine_types # noqa: F401
from .wind_turbine import create_power_curve # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
<commit_before>__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import (
WindTurbine,
get_turbine_types,
create_power_curve,
) # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
<commit_msg>Use one line per import<commit_after>
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import WindTurbine # noqa: F401
from .wind_turbine import get_turbine_types # noqa: F401
from .wind_turbine import create_power_curve # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import (
WindTurbine,
get_turbine_types,
create_power_curve,
) # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
Use one line per import__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import WindTurbine # noqa: F401
from .wind_turbine import get_turbine_types # noqa: F401
from .wind_turbine import create_power_curve # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
<commit_before>__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import (
WindTurbine,
get_turbine_types,
create_power_curve,
) # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
<commit_msg>Use one line per import<commit_after>__copyright__ = "Copyright oemof developer group"
__license__ = "MIT"
__version__ = "0.2.1dev"
from .wind_turbine import WindTurbine # noqa: F401
from .wind_turbine import get_turbine_types # noqa: F401
from .wind_turbine import create_power_curve # noqa: F401
from .wind_farm import WindFarm # noqa: F401
from .wind_turbine_cluster import WindTurbineCluster # noqa: F401
from .modelchain import ModelChain # noqa: F401
from .turbine_cluster_modelchain import TurbineClusterModelChain # noqa: F401
|
372d03b25f21d363138ecf340816dd04fb33ef71
|
docs/conf.py
|
docs/conf.py
|
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
html_theme = 'classic'
|
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
Switch to RTD docs theme.
|
Switch to RTD docs theme.
|
Python
|
bsd-3-clause
|
ubernostrum/django-soapbox,ubernostrum/django-soapbox
|
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
html_theme = 'classic'
Switch to RTD docs theme.
|
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
<commit_before>extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
html_theme = 'classic'
<commit_msg>Switch to RTD docs theme.<commit_after>
|
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
html_theme = 'classic'
Switch to RTD docs theme.import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
<commit_before>extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
html_theme = 'classic'
<commit_msg>Switch to RTD docs theme.<commit_after>import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-soapbox'
copyright = u'2011-2015, James Bennett'
version = '1.1'
release = '1.1'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'django-soapboxdoc'
latex_documents = [
('index', 'django-soapbox.tex', u'django-soapbox Documentation',
u'James Bennett', 'manual'),
]
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
8093c239128b1e8f607054c99eca3934da04a31e
|
comics/comics/dieselsweetiesweb.py
|
comics/comics/dieselsweetiesweb.py
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_date = "2000-01-01"
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_days = 155
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
Correct history capability for "Diesel Sweeties (web)"
|
Correct history capability for "Diesel Sweeties (web)"
|
Python
|
agpl-3.0
|
jodal/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics,datagutten/comics,datagutten/comics,datagutten/comics
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_date = "2000-01-01"
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
Correct history capability for "Diesel Sweeties (web)"
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_days = 155
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
<commit_before>from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_date = "2000-01-01"
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
<commit_msg>Correct history capability for "Diesel Sweeties (web)"<commit_after>
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_days = 155
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_date = "2000-01-01"
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
Correct history capability for "Diesel Sweeties (web)"from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_days = 155
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
<commit_before>from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_date = "2000-01-01"
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
<commit_msg>Correct history capability for "Diesel Sweeties (web)"<commit_after>from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Diesel Sweeties (web)"
language = "en"
url = "http://www.dieselsweeties.com/"
start_date = "2000-01-01"
rights = "Richard Stevens"
class Crawler(CrawlerBase):
history_capable_days = 155
schedule = "Mo,We,Fr"
time_zone = "US/Eastern"
def crawl(self, pub_date):
feed = self.parse_feed("http://www.dieselsweeties.com/ds-unifeed.xml")
for entry in feed.for_date(pub_date):
if not hasattr(entry, "summary"):
continue
url = entry.summary.src('img[src*="/strips666/"]')
title = entry.title
text = entry.summary.alt('img[src*="/strips666/"]')
return CrawlerImage(url, title, text)
|
5d61b4904057acbe235b74fc1122d09aa365bdeb
|
edx_data_research/monitor/monitor_tracking.py
|
edx_data_research/monitor/monitor_tracking.py
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class TrackingEventHandler(FileSystemEventHandler):
def on_created(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
args = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class TrackingLogHandler(PatternMatchingEventHandler):
def on_created(self, event):
print event.__repr__()
print event.event_type, event.is_directory, event.src_path
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingLogHandler(['*.log'], ['*.log-errors'],
case_sensitive=True)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
Define handler for tracking log files
|
Define handler for tracking log files
|
Python
|
mit
|
McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class TrackingEventHandler(FileSystemEventHandler):
def on_created(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
args = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
Define handler for tracking log files
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class TrackingLogHandler(PatternMatchingEventHandler):
def on_created(self, event):
print event.__repr__()
print event.event_type, event.is_directory, event.src_path
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingLogHandler(['*.log'], ['*.log-errors'],
case_sensitive=True)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before>import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class TrackingEventHandler(FileSystemEventHandler):
def on_created(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
args = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
<commit_msg>Define handler for tracking log files<commit_after>
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class TrackingLogHandler(PatternMatchingEventHandler):
def on_created(self, event):
print event.__repr__()
print event.event_type, event.is_directory, event.src_path
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingLogHandler(['*.log'], ['*.log-errors'],
case_sensitive=True)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class TrackingEventHandler(FileSystemEventHandler):
def on_created(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
args = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
Define handler for tracking log filesimport sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class TrackingLogHandler(PatternMatchingEventHandler):
def on_created(self, event):
print event.__repr__()
print event.event_type, event.is_directory, event.src_path
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingLogHandler(['*.log'], ['*.log-errors'],
case_sensitive=True)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before>import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class TrackingEventHandler(FileSystemEventHandler):
def on_created(self, event):
pass
def on_moved(self, event):
pass
if __name__ == "__main__":
if len(sys.argv) > 1:
args = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
<commit_msg>Define handler for tracking log files<commit_after>import sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class TrackingLogHandler(PatternMatchingEventHandler):
def on_created(self, event):
print event.__repr__()
print event.event_type, event.is_directory, event.src_path
if __name__ == "__main__":
if len(sys.argv) > 1:
path = sys.argv[1]
else:
raise ValueError('Missing path to directory to monitor!!!')
event_handler = TrackingLogHandler(['*.log'], ['*.log-errors'],
case_sensitive=True)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
7a073da50676864506c3a5de781d3c83530169e8
|
fbmsgbot/bot.py
|
fbmsgbot/bot.py
|
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message, completion):
self.client.submit_request(
'/me/thread_settings',
'POST',
message.to_json())
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
import json
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message):
greeting = {
'setting_type': 'greeting',
'greeting': {
'text': message
}
}
data = json.dumps(greeting)
response, error = self.client.submit_request(
'/me/thread_settings',
'POST',
data)
return response, error
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
Fix remove message object from greeting
|
Fix remove message object from greeting
|
Python
|
mit
|
ben-cunningham/python-messenger-bot,ben-cunningham/pybot
|
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message, completion):
self.client.submit_request(
'/me/thread_settings',
'POST',
message.to_json())
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
Fix remove message object from greeting
|
import json
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message):
greeting = {
'setting_type': 'greeting',
'greeting': {
'text': message
}
}
data = json.dumps(greeting)
response, error = self.client.submit_request(
'/me/thread_settings',
'POST',
data)
return response, error
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
<commit_before>from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message, completion):
self.client.submit_request(
'/me/thread_settings',
'POST',
message.to_json())
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
<commit_msg>Fix remove message object from greeting<commit_after>
|
import json
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message):
greeting = {
'setting_type': 'greeting',
'greeting': {
'text': message
}
}
data = json.dumps(greeting)
response, error = self.client.submit_request(
'/me/thread_settings',
'POST',
data)
return response, error
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message, completion):
self.client.submit_request(
'/me/thread_settings',
'POST',
message.to_json())
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
Fix remove message object from greetingimport json
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message):
greeting = {
'setting_type': 'greeting',
'greeting': {
'text': message
}
}
data = json.dumps(greeting)
response, error = self.client.submit_request(
'/me/thread_settings',
'POST',
data)
return response, error
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
<commit_before>from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message, completion):
self.client.submit_request(
'/me/thread_settings',
'POST',
message.to_json())
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
<commit_msg>Fix remove message object from greeting<commit_after>import json
from http_client import HttpClient
from models.message import ReceivedMessage
class Bot():
"""
@brief Facebook messenger bot
"""
def __init__(self, token):
self.api_token = token
self.client = HttpClient(token)
def send_message(self, message):
response, error = self.client.submit_request(
'/me/messages',
'POST',
message.to_json())
if error is not None:
print 'Error Encountered! Could not send message\n'
print 'Message: %s' % error
return response, error
def set_welcome(self, message):
greeting = {
'setting_type': 'greeting',
'greeting': {
'text': message
}
}
data = json.dumps(greeting)
response, error = self.client.submit_request(
'/me/thread_settings',
'POST',
data)
return response, error
def messages_for_request(self, request):
"""
Handle incoming requests from the webhook
"""
entries = request.json['entry']
messages = []
for entry in entries:
message = {}
for msg in entry['messaging']:
if msg.get('message') and msg['message'].get('text'):
messages.append(ReceivedMessage(msg))
return messages
def message_from_reciept(receipt):
"""
Handle receipts
"""
raise NotImplementedError
|
8b3d73ce9bbdcf39e7babd5637fcff9d1ad1dbf9
|
smartcard/Synchronization.py
|
smartcard/Synchronization.py
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
klass.__dict__[name] = synchronized(val)
class Synchronization:
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
setattr(klass, name, synchronized(val))
class Synchronization(object):
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
Use setattr() instead of a direct access to __dict__
|
Use setattr() instead of a direct access to __dict__
Closes Feature Request 3110077 "new style classes"
https://sourceforge.net/tracker/?func=detail&aid=3110077&group_id=196342&atid=957075
|
Python
|
lgpl-2.1
|
moreati/pyscard,moreati/pyscard,LudovicRousseau/pyscard,moreati/pyscard,LudovicRousseau/pyscard,LudovicRousseau/pyscard
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
klass.__dict__[name] = synchronized(val)
class Synchronization:
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
Use setattr() instead of a direct access to __dict__
Closes Feature Request 3110077 "new style classes"
https://sourceforge.net/tracker/?func=detail&aid=3110077&group_id=196342&atid=957075
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
setattr(klass, name, synchronized(val))
class Synchronization(object):
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
<commit_before>"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
klass.__dict__[name] = synchronized(val)
class Synchronization:
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
<commit_msg>Use setattr() instead of a direct access to __dict__
Closes Feature Request 3110077 "new style classes"
https://sourceforge.net/tracker/?func=detail&aid=3110077&group_id=196342&atid=957075<commit_after>
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
setattr(klass, name, synchronized(val))
class Synchronization(object):
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
klass.__dict__[name] = synchronized(val)
class Synchronization:
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
Use setattr() instead of a direct access to __dict__
Closes Feature Request 3110077 "new style classes"
https://sourceforge.net/tracker/?func=detail&aid=3110077&group_id=196342&atid=957075"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
setattr(klass, name, synchronized(val))
class Synchronization(object):
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
<commit_before>"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
klass.__dict__[name] = synchronized(val)
class Synchronization:
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
<commit_msg>Use setattr() instead of a direct access to __dict__
Closes Feature Request 3110077 "new style classes"
https://sourceforge.net/tracker/?func=detail&aid=3110077&group_id=196342&atid=957075<commit_after>"""
from Thinking in Python, Bruce Eckel
http://mindview.net/Books/TIPython
Simple emulation of Java's 'synchronized'
keyword, from Peter Norvig.
"""
from threading import RLock
def synchronized(method):
def f(*args):
self = args[0]
self.mutex.acquire()
# print method.__name__, 'acquired'
try:
return apply(method, args)
finally:
self.mutex.release()
# print method.__name__, 'released'
return f
def synchronize(klass, names=None):
"""Synchronize methods in the given class.
Only synchronize the methods whose names are
given, or all methods if names=None."""
if type(names) == type(''):
names = names.split()
for (name, val) in klass.__dict__.items():
if callable(val) and name != '__init__' and \
(names == None or name in names):
# print "synchronizing", name
setattr(klass, name, synchronized(val))
class Synchronization(object):
# You can create your own self.mutex, or inherit from this class:
def __init__(self):
self.mutex = RLock()
|
1b6fd254472db859960a094454f1229fabee1ec4
|
back-end/BAA/settings/prod.py
|
back-end/BAA/settings/prod.py
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'baattendance@baa.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'ian@ianluo.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
Use my own email as the stuff
|
Use my own email as the stuff
|
Python
|
mit
|
jumbocodespring2017/bostonathleticsassociation,jumbocodespring2017/bostonathleticsassociation,jumbocodespring2017/bostonathleticsassociation
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'baattendance@baa.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
Use my own email as the stuff
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'ian@ianluo.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
<commit_before>from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'baattendance@baa.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
<commit_msg>Use my own email as the stuff<commit_after>
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'ian@ianluo.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'baattendance@baa.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
Use my own email as the stufffrom .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'ian@ianluo.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
<commit_before>from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'baattendance@baa.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
<commit_msg>Use my own email as the stuff<commit_after>from .common import *
import dj_database_url
# Settings for production environment
DEBUG = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['floating-castle-71814.herokuapp.com'] # apparently you need to have this
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
POSTMARK_SENDER = 'ian@ianluo.com'
POSTMARK_TEST_MODE = True # We can use this to just to see the json
POSTMARK_TRACK_OPENS = False
|
c5f153ce69819acdc8f83704daa919fb0cc0b02b
|
bookmarks/default_settings.py
|
bookmarks/default_settings.py
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
TEST_DATABASE_NAME = 'bookmarks_test'
|
Add default test database name to default settings
|
Add default test database name to default settings
|
Python
|
apache-2.0
|
byanofsky/bookmarks,byanofsky/bookmarks,byanofsky/bookmarks
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
Add default test database name to default settings
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
TEST_DATABASE_NAME = 'bookmarks_test'
|
<commit_before>import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
<commit_msg>Add default test database name to default settings<commit_after>
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
TEST_DATABASE_NAME = 'bookmarks_test'
|
import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
Add default test database name to default settingsimport pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
TEST_DATABASE_NAME = 'bookmarks_test'
|
<commit_before>import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
<commit_msg>Add default test database name to default settings<commit_after>import pkg_resources # part of setuptools
USER_AGENT_NAME = 'bookmarks'
VERSION_NUMBER = pkg_resources.require('bookmarks')[0].version
SECRET_KEY = 'development key'
DATABASE_USERNAME = 'bookmarks'
DATABASE_PASSWORD = ''
DATABASE_HOST = 'localhost'
DATABASE_NAME = 'bookmarks'
TEST_DATABASE_NAME = 'bookmarks_test'
|
97696fafb6ce556781c02a130ae5f0e610c9bf45
|
test/selenium/src/lib/file_ops.py
|
test/selenium/src/lib/file_ops.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import logging
import os
import shutil
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
"""Add numeric postfix for file."""
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
"""Remove all files and sub-dir in provided path."""
shutil.rmtree(path)
|
Delete sub folders in log directory
|
Delete sub folders in log directory
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,plamut/ggrc-core
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
Delete sub folders in log directory
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import logging
import os
import shutil
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
"""Add numeric postfix for file."""
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
"""Remove all files and sub-dir in provided path."""
shutil.rmtree(path)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
<commit_msg>Delete sub folders in log directory<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import logging
import os
import shutil
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
"""Add numeric postfix for file."""
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
"""Remove all files and sub-dir in provided path."""
shutil.rmtree(path)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
Delete sub folders in log directory# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import logging
import os
import shutil
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
"""Add numeric postfix for file."""
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
"""Remove all files and sub-dir in provided path."""
shutil.rmtree(path)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import os
import logging
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
for file_name in os.listdir(path):
os.remove(path + os.sep + file_name)
<commit_msg>Delete sub folders in log directory<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import logging
import os
import shutil
logger = logging.getLogger(__name__)
def create_directory(path):
"""
Creates a directory if it doesn't already exist.
"""
# Check if path is a file_path or a dir_path. Dir path is a string that
# ends with os.sep
if path[-1] != os.sep:
path, file_name = os.path.split(path)
if not os.path.exists(path):
logger.info("Creating directory: %s", path)
os.makedirs(path)
def get_unique_postfix(file_path, extension):
"""Add numeric postfix for file."""
postfix = 0
new_path = file_path + str(postfix) + extension
while os.path.isfile(new_path):
postfix += 1
new_path = file_path + str(postfix) + extension
return new_path
def delete_directory_contents(path):
"""Remove all files and sub-dir in provided path."""
shutil.rmtree(path)
|
aa4174fa44994a30cae5be9a59eee6dd55ece201
|
tests/acceptance/response_test.py
|
tests/acceptance/response_test.py
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
Test needs skipping if on p3k
|
Test needs skipping if on p3k
|
Python
|
bsd-3-clause
|
brianthelion/pyramid_swagger,prat0318/pyramid_swagger,striglia/pyramid_swagger,analogue/pyramid_swagger,striglia/pyramid_swagger
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
Test needs skipping if on p3k
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
<commit_before>from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
<commit_msg>Test needs skipping if on p3k<commit_after>
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
Test needs skipping if on p3kfrom .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
<commit_before>from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
<commit_msg>Test needs skipping if on p3k<commit_after>from .request_test import test_app
def test_200_for_normal_response_validation():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.enable_response_validation': True,
}
test_app(settings).post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
status=200
)
def test_200_skip_validation_with_wrong_response():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.skip_validation': '/(sample)\\b',
}
test_app(settings).get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'},
status=200
)
|
a777ace1d29843a2444d3b73cafea9a808945bc9
|
django_gears/templatetags/gears.py
|
django_gears/templatetags/gears.py
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'css_asset_tag' tag takes one argument:"
" the logical path to the public asset")
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
raise TemplateSyntaxError("Second (optional) argument to"
" 'css_asset_tag' tag must be 'debug'")
logical_path = parser.compile_filter(bits[1])
return CSSAssetTagNode(logical_path, debug)
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
msg = '%r tag takes one argument: the logical path to the public asset'
raise TemplateSyntaxError(msg % bits[0])
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
msg = "Second (optional) argument to %r tag must be 'debug'"
raise TemplateSyntaxError(msg % bits[0])
logical_path = parser.compile_filter(bits[1])
return cls(logical_path, debug)
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
return CSSAssetTagNode.handle_token(parser, token)
|
Move token handling from tag function to CSSAssetTagNode
|
Move token handling from tag function to CSSAssetTagNode
|
Python
|
isc
|
wiserthanever/django-gears,juliomenendez/django-gears,wiserthanever/django-gears,juliomenendez/django-gears,juliomenendez/django-gears,gears/django-gears,wiserthanever/django-gears,gears/django-gears,juliomenendez/django-gears,gears/django-gears,wiserthanever/django-gears
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'css_asset_tag' tag takes one argument:"
" the logical path to the public asset")
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
raise TemplateSyntaxError("Second (optional) argument to"
" 'css_asset_tag' tag must be 'debug'")
logical_path = parser.compile_filter(bits[1])
return CSSAssetTagNode(logical_path, debug)
Move token handling from tag function to CSSAssetTagNode
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
msg = '%r tag takes one argument: the logical path to the public asset'
raise TemplateSyntaxError(msg % bits[0])
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
msg = "Second (optional) argument to %r tag must be 'debug'"
raise TemplateSyntaxError(msg % bits[0])
logical_path = parser.compile_filter(bits[1])
return cls(logical_path, debug)
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
return CSSAssetTagNode.handle_token(parser, token)
|
<commit_before>from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'css_asset_tag' tag takes one argument:"
" the logical path to the public asset")
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
raise TemplateSyntaxError("Second (optional) argument to"
" 'css_asset_tag' tag must be 'debug'")
logical_path = parser.compile_filter(bits[1])
return CSSAssetTagNode(logical_path, debug)
<commit_msg>Move token handling from tag function to CSSAssetTagNode<commit_after>
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
msg = '%r tag takes one argument: the logical path to the public asset'
raise TemplateSyntaxError(msg % bits[0])
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
msg = "Second (optional) argument to %r tag must be 'debug'"
raise TemplateSyntaxError(msg % bits[0])
logical_path = parser.compile_filter(bits[1])
return cls(logical_path, debug)
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
return CSSAssetTagNode.handle_token(parser, token)
|
from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'css_asset_tag' tag takes one argument:"
" the logical path to the public asset")
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
raise TemplateSyntaxError("Second (optional) argument to"
" 'css_asset_tag' tag must be 'debug'")
logical_path = parser.compile_filter(bits[1])
return CSSAssetTagNode(logical_path, debug)
Move token handling from tag function to CSSAssetTagNodefrom __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
msg = '%r tag takes one argument: the logical path to the public asset'
raise TemplateSyntaxError(msg % bits[0])
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
msg = "Second (optional) argument to %r tag must be 'debug'"
raise TemplateSyntaxError(msg % bits[0])
logical_path = parser.compile_filter(bits[1])
return cls(logical_path, debug)
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
return CSSAssetTagNode.handle_token(parser, token)
|
<commit_before>from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'css_asset_tag' tag takes one argument:"
" the logical path to the public asset")
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
raise TemplateSyntaxError("Second (optional) argument to"
" 'css_asset_tag' tag must be 'debug'")
logical_path = parser.compile_filter(bits[1])
return CSSAssetTagNode(logical_path, debug)
<commit_msg>Move token handling from tag function to CSSAssetTagNode<commit_after>from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from gears.assets import build_asset
from ..settings import environment, GEARS_URL, GEARS_DEBUG
register = Library()
class CSSAssetTagNode(Node):
template = u'<link rel="stylesheet" href="%s%%s">' % GEARS_URL
def __init__(self, logical_path, debug):
self.logical_path = logical_path
self.debug = debug
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) not in (2, 3):
msg = '%r tag takes one argument: the logical path to the public asset'
raise TemplateSyntaxError(msg % bits[0])
debug = (len(bits) == 3)
if debug and bits[2] != 'debug':
msg = "Second (optional) argument to %r tag must be 'debug'"
raise TemplateSyntaxError(msg % bits[0])
logical_path = parser.compile_filter(bits[1])
return cls(logical_path, debug)
def render(self, context):
logical_path = self.logical_path.resolve(context)
if self.debug or GEARS_DEBUG:
asset = build_asset(environment, logical_path)
paths = (('%s?body=1' % r.attributes.logical_path) for r in asset.requirements)
else:
paths = (logical_path,)
return '\n'.join((self.template % path) for path in paths)
@register.tag
def css_asset_tag(parser, token):
return CSSAssetTagNode.handle_token(parser, token)
|
08812c8507fac2c57bd143dd7aad4c54d5c0aa75
|
panoptes_client/user.py
|
panoptes_client/user.py
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
if not email:
for user in super(User, cls).where(**kwargs):
yield user
return
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
if email:
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, 50):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
Allow batched User lookups by login name
|
Allow batched User lookups by login name
|
Python
|
apache-2.0
|
zooniverse/panoptes-python-client
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
if not email:
for user in super(User, cls).where(**kwargs):
yield user
return
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
Allow batched User lookups by login name
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
if email:
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, 50):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
<commit_before>from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
if not email:
for user in super(User, cls).where(**kwargs):
yield user
return
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
<commit_msg>Allow batched User lookups by login name<commit_after>
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
if email:
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, 50):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
if not email:
for user in super(User, cls).where(**kwargs):
yield user
return
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
Allow batched User lookups by login namefrom __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
if email:
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, 50):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
<commit_before>from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
if not email:
for user in super(User, cls).where(**kwargs):
yield user
return
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
<commit_msg>Allow batched User lookups by login name<commit_after>from __future__ import absolute_import, division, print_function
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.utils import isiterable, split
class User(PanoptesObject):
_api_slug = 'users'
_link_slug = 'users'
_edit_attributes = (
'valid_email',
)
@classmethod
def where(cls, **kwargs):
email = kwargs.get('email')
login = kwargs.get('login')
if email and login:
raise ValueError(
'Queries are supported on at most ONE of email and login'
)
if email:
if not isiterable(email):
email = [email]
for batch in split(email, 50):
kwargs['email'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
elif login:
if not isiterable(login):
login = [login]
for batch in split(login, 50):
kwargs['login'] = ",".join(batch)
for user in super(User, cls).where(**kwargs):
yield user
else:
for user in super(User, cls).where(**kwargs):
yield user
@property
def avatar(self):
"""
A dict containing metadata about the user's avatar.
"""
return User.http_get('{}/avatar'.format(self.id))[0]
LinkResolver.register(User)
LinkResolver.register(User, 'owner')
|
a0099789f404a06a64884ff7ce7b6bfd6597846a
|
waterbutler/server/api/v1/provider/create.py
|
waterbutler/server/api/v1/provider/create.py
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.name = self.get_query_argument('name') # TODO What does this do?
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
self.path = os.path.join(self.path, self.name)
if self.kind == 'folder':
self.path += '/'
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
if self.path.endswith('/'):
name = self.get_query_argument('name') # TODO What does this do?
self.path = os.path.join(self.path, name)
if self.kind == 'folder':
self.path += '/'
elif self.kind == 'folder':
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
Allow uploading directly to a file
|
Allow uploading directly to a file
|
Python
|
apache-2.0
|
Johnetordoff/waterbutler,rafaeldelucena/waterbutler,chrisseto/waterbutler,CenterForOpenScience/waterbutler,kwierman/waterbutler,RCOSDP/waterbutler,felliott/waterbutler,rdhyee/waterbutler,TomBaxter/waterbutler,icereval/waterbutler,cosenal/waterbutler
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.name = self.get_query_argument('name') # TODO What does this do?
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
self.path = os.path.join(self.path, self.name)
if self.kind == 'folder':
self.path += '/'
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
Allow uploading directly to a file
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
if self.path.endswith('/'):
name = self.get_query_argument('name') # TODO What does this do?
self.path = os.path.join(self.path, name)
if self.kind == 'folder':
self.path += '/'
elif self.kind == 'folder':
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
<commit_before>import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.name = self.get_query_argument('name') # TODO What does this do?
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
self.path = os.path.join(self.path, self.name)
if self.kind == 'folder':
self.path += '/'
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
<commit_msg>Allow uploading directly to a file<commit_after>
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
if self.path.endswith('/'):
name = self.get_query_argument('name') # TODO What does this do?
self.path = os.path.join(self.path, name)
if self.kind == 'folder':
self.path += '/'
elif self.kind == 'folder':
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.name = self.get_query_argument('name') # TODO What does this do?
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
self.path = os.path.join(self.path, self.name)
if self.kind == 'folder':
self.path += '/'
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
Allow uploading directly to a fileimport os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
if self.path.endswith('/'):
name = self.get_query_argument('name') # TODO What does this do?
self.path = os.path.join(self.path, name)
if self.kind == 'folder':
self.path += '/'
elif self.kind == 'folder':
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
<commit_before>import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.name = self.get_query_argument('name') # TODO What does this do?
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
self.path = os.path.join(self.path, self.name)
if self.kind == 'folder':
self.path += '/'
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
<commit_msg>Allow uploading directly to a file<commit_after>import os
import asyncio
from tornado.web import HTTPError
class CreateMixin:
def validate_put(self):
self.kind = self.get_query_argument('kind', default='file')
if self.kind not in ('file', 'folder'):
raise HTTPError(400)
if self.path.endswith('/'):
name = self.get_query_argument('name') # TODO What does this do?
self.path = os.path.join(self.path, name)
if self.kind == 'folder':
self.path += '/'
elif self.kind == 'folder':
raise HTTPError(400)
length = self.request.headers.get('Content-Length')
if length is None and self.kind == 'file':
raise HTTPError(400)
try:
if int(length) > 0 and self.kind == 'folder':
raise HTTPError(400)
except ValueError:
raise HTTPError(400)
@asyncio.coroutine
def create_folder(self):
metadata = yield from self.provider.create_folder(self.path)
self.set_status(201)
self.write(metadata.serialized())
@asyncio.coroutine
def upload_file(self):
self.writer.write_eof()
metadata, created = yield from self.uploader
self.writer.close()
self.wsock.close()
if created:
self.set_status(201)
self.write(metadata.serialized())
|
26fc7b48d2d019b3c83db0d518d5cd99204f7982
|
linux/keyman-config/keyman_config/__init__.py
|
linux/keyman-config/keyman_config/__init__.py
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://staging-keyman-com.azurewebsites.net'
KeymanApiUrl = 'https://staging-api-keyman-com.azurewebsites.net'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://keyman-staging.com'
KeymanApiUrl = 'https://api.keyman-staging.com'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
Use new staging site names
|
feat(linux): Use new staging site names
|
Python
|
apache-2.0
|
tavultesoft/keymanweb,tavultesoft/keymanweb
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://staging-keyman-com.azurewebsites.net'
KeymanApiUrl = 'https://staging-api-keyman-com.azurewebsites.net'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
feat(linux): Use new staging site names
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://keyman-staging.com'
KeymanApiUrl = 'https://api.keyman-staging.com'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
<commit_before>from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://staging-keyman-com.azurewebsites.net'
KeymanApiUrl = 'https://staging-api-keyman-com.azurewebsites.net'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
<commit_msg>feat(linux): Use new staging site names<commit_after>
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://keyman-staging.com'
KeymanApiUrl = 'https://api.keyman-staging.com'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://staging-keyman-com.azurewebsites.net'
KeymanApiUrl = 'https://staging-api-keyman-com.azurewebsites.net'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
feat(linux): Use new staging site namesfrom .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://keyman-staging.com'
KeymanApiUrl = 'https://api.keyman-staging.com'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
<commit_before>from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://staging-keyman-com.azurewebsites.net'
KeymanApiUrl = 'https://staging-api-keyman-com.azurewebsites.net'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
<commit_msg>feat(linux): Use new staging site names<commit_after>from .version import __version__
from .version import __majorversion__
from .version import __releaseversion__
from .version import __tier__
if __tier__ == 'alpha':
# Alpha versions will work against the staging server so that they
# can access new APIs etc that will only be available there. The staging
# servers have resource constraints but should be okay for limited use.
KeymanComUrl = 'https://keyman-staging.com'
KeymanApiUrl = 'https://api.keyman-staging.com'
else:
KeymanComUrl = 'https://keyman.com'
KeymanApiUrl = 'https://api.keyman.com'
|
c1acf5904ba8c48bb58e104195380b0bbce1ed8e
|
rest_framework_captcha/decorators.py
|
rest_framework_captcha/decorators.py
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"error": True, "detail": "invalid_captcha_headers", "message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"error": True, "detail": "invalid_captcha", "message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
Add error and detail field to captcha errors
|
Add error and detail field to captcha errors
|
Python
|
mit
|
leonardoarroyo/rest-framework-captcha
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
Add error and detail field to captcha errors
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"error": True, "detail": "invalid_captcha_headers", "message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"error": True, "detail": "invalid_captcha", "message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
<commit_before>from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
<commit_msg>Add error and detail field to captcha errors<commit_after>
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"error": True, "detail": "invalid_captcha_headers", "message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"error": True, "detail": "invalid_captcha", "message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
Add error and detail field to captcha errorsfrom functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"error": True, "detail": "invalid_captcha_headers", "message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"error": True, "detail": "invalid_captcha", "message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
<commit_before>from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
<commit_msg>Add error and detail field to captcha errors<commit_after>from functools import wraps
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework_captcha.models import Captcha
from rest_framework_captcha.helpers import get_settings, get_request_from_args
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
def protected_view(func):
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_from_args(*args)
uuid = request.META.get("HTTP_X_CAPTCHA_UUID", None)
secret = request.META.get("HTTP_X_CAPTCHA_SECRET", None)
time_limit = get_settings().get("EXPIRE_IN", 5*60)
if not uuid or not secret:
return Response({"error": True, "detail": "invalid_captcha_headers", "message": "This view is protected by captcha. You have to set headers X-Captcha-UUID and X-Captcha-Secret with valid values."}, status=400)
try:
captcha = Captcha.objects.get(uuid=uuid, secret__iexact=secret, fresh=True, created_at__gte=timezone.now() - relativedelta(seconds=time_limit))
except (Captcha.DoesNotExist, ValueError):
return Response({"error": True, "detail": "invalid_captcha", "message": "Invalid/expired captcha or incorrect secret."}, status=400)
captcha.fresh = False
captcha.save()
return func(*args, **kwargs)
return wrapper
|
2f9e8ebccfe5057349e8d563b774481a22261f22
|
linter.py
|
linter.py
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${args} ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
Add `${args}` marker to cmd
|
Add `${args}` marker to cmd
|
Python
|
mit
|
SublimeLinter/SublimeLinter-csslint
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
Add `${args}` marker to cmd
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${args} ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
<commit_before>from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
<commit_msg>Add `${args}` marker to cmd<commit_after>
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${args} ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
Add `${args}` marker to cmdfrom SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${args} ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
<commit_before>from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
<commit_msg>Add `${args}` marker to cmd<commit_after>from SublimeLinter.lint import Linter, util
class CSSLint(Linter):
cmd = 'csslint --format=compact ${args} ${temp_file}'
regex = r'''(?xi)
^.+:\s* # filename
# csslint emits errors that pertain to the code as a whole,
# in which case there is no line/col information, so that
# part is optional.
(?:line\ (?P<line>\d+),\ col\ (?P<col>\d+),\ )?
(?:(?P<error>error)|(?P<warning>warning))\ -\ (?P<message>.*)
'''
word_re = r'^([#\.]?[-\w]+)'
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'css'
defaults = {
'selector': 'source.css - meta.attribute-with-value',
'--errors=,': '',
'--warnings=,': '',
'--ignore=,': ''
}
def split_match(self, match):
"""
Extract and return values from match.
We override this method so that general errors that do not have
a line number can be placed at the beginning of the code.
"""
match, line, col, error, warning, message, near = super().split_match(match)
if line is None and message:
line = 0
col = 0
return match, line, col, error, warning, message, near
|
36b6ea45d16946e0c134496b2417505d332821e3
|
linter.py
|
linter.py
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*?\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
Make block comment parsing less greedy.
|
Make block comment parsing less greedy.
|
Python
|
mit
|
SublimeLinter/SublimeLinter-json
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
Make block comment parsing less greedy.
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*?\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
<commit_before>#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
<commit_msg>Make block comment parsing less greedy.<commit_after>
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*?\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
Make block comment parsing less greedy.#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*?\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
<commit_before>#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
<commit_msg>Make block comment parsing less greedy.<commit_after>#
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2013 Aparajita Fishman
#
# License: MIT
#
"""This module exports the JSON plugin linter class."""
import json
import os.path
import re
from SublimeLinter.lint import Linter
class JSON(Linter):
"""Provides an interface to json.loads()."""
syntax = 'json'
cmd = None
regex = r'^(?P<message>.+):\s*line (?P<line>\d+) column (?P<col>\d+)'
def run(self, cmd, code):
"""Attempt to parse code as JSON, return '' if it succeeds, the error message if it fails."""
# Ignore comments in .sublime-settings files.
if os.path.splitext(self.filename)[1] == '.sublime-settings':
code = re.sub(r'\s*//.*', '', code) # Line comments.
code = re.sub(r'\s*/\*.*?\*/', '', code, flags=re.DOTALL) # Block comments.
try:
json.loads(code)
return ''
except ValueError as err:
return str(err)
|
66429bf759c3c6ffcf44db5197b1f09469fdf022
|
website/addons/twofactor/views.py
|
website/addons/twofactor/views.py
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user, user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
Fix missing argument (issue arose from merge)
|
Fix missing argument (issue arose from merge)
|
Python
|
apache-2.0
|
rdhyee/osf.io,alexschiller/osf.io,adlius/osf.io,jolene-esposito/osf.io,brianjgeiger/osf.io,jnayak1/osf.io,caneruguz/osf.io,caneruguz/osf.io,kch8qx/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,caseyrollins/osf.io,doublebits/osf.io,GageGaskins/osf.io,billyhunt/osf.io,himanshuo/osf.io,cldershem/osf.io,himanshuo/osf.io,SSJohns/osf.io,Ghalko/osf.io,erinspace/osf.io,saradbowman/osf.io,leb2dg/osf.io,SSJohns/osf.io,barbour-em/osf.io,monikagrabowska/osf.io,samchrisinger/osf.io,Ghalko/osf.io,haoyuchen1992/osf.io,mluo613/osf.io,revanthkolli/osf.io,zkraime/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,jinluyuan/osf.io,kch8qx/osf.io,lyndsysimon/osf.io,ZobairAlijan/osf.io,kwierman/osf.io,GageGaskins/osf.io,binoculars/osf.io,petermalcolm/osf.io,icereval/osf.io,zachjanicki/osf.io,felliott/osf.io,ZobairAlijan/osf.io,emetsger/osf.io,KAsante95/osf.io,barbour-em/osf.io,zkraime/osf.io,monikagrabowska/osf.io,billyhunt/osf.io,njantrania/osf.io,cslzchen/osf.io,wearpants/osf.io,lamdnhan/osf.io,GaryKriebel/osf.io,lyndsysimon/osf.io,asanfilippo7/osf.io,njantrania/osf.io,alexschiller/osf.io,doublebits/osf.io,hmoco/osf.io,KAsante95/osf.io,barbour-em/osf.io,ticklemepierce/osf.io,chrisseto/osf.io,KAsante95/osf.io,kwierman/osf.io,Johnetordoff/osf.io,GaryKriebel/osf.io,wearpants/osf.io,laurenrevere/osf.io,baylee-d/osf.io,fabianvf/osf.io,fabianvf/osf.io,cosenal/osf.io,bdyetton/prettychart,aaxelb/osf.io,billyhunt/osf.io,revanthkolli/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,acshi/osf.io,jinluyuan/osf.io,KAsante95/osf.io,GaryKriebel/osf.io,cldershem/osf.io,doublebits/osf.io,dplorimer/osf,brandonPurvis/osf.io,cosenal/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,fabianvf/osf.io,brandonPurvis/osf.io,mluke93/osf.io,jmcarp/osf.io,himanshuo/osf.io,crcresearch/osf.io,mluo613/osf.io,crcresearch/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,GageGaskins/osf.io,Ghalko/osf.io,cwisecarver/osf.io,arpitar/osf.io,HarryRybacki/osf.io,acshi/osf.io,samanehsan/osf.io,jeffreyliu3230/osf.io,HarryRybacki/osf.io,kwierman/osf.io,zamattiac/osf.io,ckc6cz/osf.io,TomHeatwole/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,kwierman/osf.io,baylee-d/osf.io,caseyrollins/osf.io,haoyuchen1992/osf.io,laurenrevere/osf.io,sloria/osf.io,samchrisinger/osf.io,haoyuchen1992/osf.io,zkraime/osf.io,TomBaxter/osf.io,amyshi188/osf.io,RomanZWang/osf.io,rdhyee/osf.io,Nesiehr/osf.io,hmoco/osf.io,brandonPurvis/osf.io,ckc6cz/osf.io,felliott/osf.io,ZobairAlijan/osf.io,ckc6cz/osf.io,caseyrygt/osf.io,MerlinZhang/osf.io,reinaH/osf.io,GageGaskins/osf.io,erinspace/osf.io,jmcarp/osf.io,petermalcolm/osf.io,njantrania/osf.io,rdhyee/osf.io,AndrewSallans/osf.io,felliott/osf.io,caneruguz/osf.io,mattclark/osf.io,jmcarp/osf.io,asanfilippo7/osf.io,sbt9uc/osf.io,samanehsan/osf.io,kch8qx/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,arpitar/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,hmoco/osf.io,zamattiac/osf.io,acshi/osf.io,zachjanicki/osf.io,CenterForOpenScience/osf.io,kch8qx/osf.io,ticklemepierce/osf.io,mfraezz/osf.io,jolene-esposito/osf.io,sbt9uc/osf.io,RomanZWang/osf.io,caseyrollins/osf.io,petermalcolm/osf.io,lamdnhan/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,dplorimer/osf,CenterForOpenScience/osf.io,doublebits/osf.io,cslzchen/osf.io,cosenal/osf.io,emetsger/osf.io,billyhunt/osf.io,chrisseto/osf.io,dplorimer/osf,binoculars/osf.io,sbt9uc/osf.io,caseyrygt/osf.io,DanielSBrown/osf.io,acshi/osf.io,Johnetordoff/osf.io,jeffreyliu3230/osf.io,binoculars/osf.io,lamdnhan/osf.io,pattisdr/osf.io,ZobairAlijan/osf.io,sloria/osf.io,Nesiehr/osf.io,kushG/osf.io,amyshi188/osf.io,barbour-em/osf.io,kushG/osf.io,cwisecarver/osf.io,samanehsan/osf.io,jolene-esposito/osf.io,aaxelb/osf.io,ticklemepierce/osf.io,bdyetton/prettychart,jnayak1/osf.io,jolene-esposito/osf.io,arpitar/osf.io,doublebits/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HarryRybacki/osf.io,zachjanicki/osf.io,abought/osf.io,mluo613/osf.io,acshi/osf.io,reinaH/osf.io,alexschiller/osf.io,samanehsan/osf.io,crcresearch/osf.io,rdhyee/osf.io,himanshuo/osf.io,pattisdr/osf.io,caseyrygt/osf.io,TomBaxter/osf.io,billyhunt/osf.io,MerlinZhang/osf.io,Nesiehr/osf.io,jmcarp/osf.io,arpitar/osf.io,HalcyonChimera/osf.io,Ghalko/osf.io,MerlinZhang/osf.io,felliott/osf.io,mfraezz/osf.io,reinaH/osf.io,abought/osf.io,samchrisinger/osf.io,kch8qx/osf.io,hmoco/osf.io,DanielSBrown/osf.io,CenterForOpenScience/osf.io,jeffreyliu3230/osf.io,wearpants/osf.io,SSJohns/osf.io,monikagrabowska/osf.io,danielneis/osf.io,DanielSBrown/osf.io,caneruguz/osf.io,Nesiehr/osf.io,cosenal/osf.io,abought/osf.io,lyndsysimon/osf.io,cslzchen/osf.io,cwisecarver/osf.io,petermalcolm/osf.io,sloria/osf.io,GaryKriebel/osf.io,icereval/osf.io,zamattiac/osf.io,danielneis/osf.io,lamdnhan/osf.io,monikagrabowska/osf.io,asanfilippo7/osf.io,mluke93/osf.io,pattisdr/osf.io,aaxelb/osf.io,wearpants/osf.io,revanthkolli/osf.io,chennan47/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,RomanZWang/osf.io,dplorimer/osf,KAsante95/osf.io,sbt9uc/osf.io,zamattiac/osf.io,chrisseto/osf.io,zachjanicki/osf.io,ckc6cz/osf.io,amyshi188/osf.io,alexschiller/osf.io,brandonPurvis/osf.io,MerlinZhang/osf.io,jeffreyliu3230/osf.io,kushG/osf.io,kushG/osf.io,jnayak1/osf.io,HarryRybacki/osf.io,mfraezz/osf.io,revanthkolli/osf.io,mattclark/osf.io,bdyetton/prettychart,baylee-d/osf.io,cwisecarver/osf.io,mattclark/osf.io,bdyetton/prettychart,alexschiller/osf.io,adlius/osf.io,brianjgeiger/osf.io,jinluyuan/osf.io,TomHeatwole/osf.io,aaxelb/osf.io,adlius/osf.io,RomanZWang/osf.io,mluke93/osf.io,samchrisinger/osf.io,leb2dg/osf.io,erinspace/osf.io,icereval/osf.io,mluo613/osf.io,cldershem/osf.io,emetsger/osf.io,fabianvf/osf.io,RomanZWang/osf.io,GageGaskins/osf.io,zkraime/osf.io,lyndsysimon/osf.io,cldershem/osf.io,HalcyonChimera/osf.io,danielneis/osf.io,adlius/osf.io,leb2dg/osf.io,mluke93/osf.io,reinaH/osf.io,mfraezz/osf.io,AndrewSallans/osf.io,emetsger/osf.io,saradbowman/osf.io,abought/osf.io,chennan47/osf.io,danielneis/osf.io,njantrania/osf.io,chennan47/osf.io,mluo613/osf.io,jinluyuan/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user, user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)Fix missing argument (issue arose from merge)
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
<commit_before>import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user, user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)<commit_msg>Fix missing argument (issue arose from merge)<commit_after>
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user, user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)Fix missing argument (issue arose from merge)import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
<commit_before>import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user, user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)<commit_msg>Fix missing argument (issue arose from merge)<commit_after>import httplib as http
import json
from framework import request
from framework.auth import get_current_user
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from website.project.decorators import must_have_addon
@must_be_logged_in
@must_have_addon('twofactor', 'user')
def user_settings(user_addon, *args, **kwargs):
try:
data = json.loads(request.data)
except ValueError:
raise HTTPError(code=http.BAD_REQUEST)
code = data.get('code')
if user_addon.verify_code(code):
user_addon.is_confirmed = True
user_addon.save()
return {}
raise HTTPError(403)
|
146f6204e58695ca469cec7a79757ce9a730719e
|
contrib/migrateticketmodel.py
|
contrib/migrateticketmodel.py
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
Fix missing import in contrib script added in [2630].
|
Fix missing import in contrib script added in [2630].
|
Python
|
bsd-3-clause
|
pkdevbox/trac,pkdevbox/trac,pkdevbox/trac,pkdevbox/trac
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
Fix missing import in contrib script added in [2630].
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
<commit_msg>Fix missing import in contrib script added in [2630].<commit_after>
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
Fix missing import in contrib script added in [2630].#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
<commit_msg>Fix missing import in contrib script added in [2630].<commit_after>#!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
db = env.get_db_cnx()
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio, db)
priority.name = newprio
priority.update(db)
for severity in list(Severity.select(env, db)):
severity.delete(db)
db.commit()
if __name__ == '__main__':
main()
|
8213a758782a7ab6cecc5a986e193f204fe57691
|
scrapy_gridfsfilespipeline/images.py
|
scrapy_gridfsfilespipeline/images.py
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
@classmethod
def from_settings(cls, settings):
store_uri = settings['MONGO_URI']
return cls(store_uri, settings=settings)
|
Add GridFSImagesPipeline.from_settings to use MONGO_URI
|
Add GridFSImagesPipeline.from_settings to use MONGO_URI
|
Python
|
bsd-2-clause
|
zahariesergiu/scrapy-gridfsfilespipeline
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
Add GridFSImagesPipeline.from_settings to use MONGO_URI
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
@classmethod
def from_settings(cls, settings):
store_uri = settings['MONGO_URI']
return cls(store_uri, settings=settings)
|
<commit_before>from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
<commit_msg>Add GridFSImagesPipeline.from_settings to use MONGO_URI<commit_after>
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
@classmethod
def from_settings(cls, settings):
store_uri = settings['MONGO_URI']
return cls(store_uri, settings=settings)
|
from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
Add GridFSImagesPipeline.from_settings to use MONGO_URIfrom scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
@classmethod
def from_settings(cls, settings):
store_uri = settings['MONGO_URI']
return cls(store_uri, settings=settings)
|
<commit_before>from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
<commit_msg>Add GridFSImagesPipeline.from_settings to use MONGO_URI<commit_after>from scrapy.pipelines.images import ImagesPipeline
from .files import GridFSFilesPipeline
class GridFSImagesPipeline(ImagesPipeline, GridFSFilesPipeline):
"""
An extension of ImagesPipeline that store files in MongoDB GridFS.
Is using a guid to check if the file exists in GridFS and MongoDB ObjectId to reference the file with item.
ImagesPipeline was using a single variable 'path' for reference and identification.
guid is used in MongoGridFSFilesPipeline because the pipeline needs a unique identifier generated based on file URL.
MongoGridFSFilesPipeline is using ObjectId to reference the file because it's the primary key.
"""
@classmethod
def from_settings(cls, settings):
store_uri = settings['MONGO_URI']
return cls(store_uri, settings=settings)
|
b65293d2bc21a0385a6170e4fbd9ee7c4ce1c631
|
mopidy/frontends/mpd/protocol/audio_output.py
|
mopidy/frontends/mpd/protocol/audio_output.py
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
# TODO Return proper error on unknown outputid
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
# TODO Return proper error on unknown outputid
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
Add TODO for handling unknown outpitid
|
mpd: Add TODO for handling unknown outpitid
|
Python
|
apache-2.0
|
quartz55/mopidy,diandiankan/mopidy,jcass77/mopidy,swak/mopidy,hkariti/mopidy,dbrgn/mopidy,vrs01/mopidy,rawdlite/mopidy,tkem/mopidy,hkariti/mopidy,bencevans/mopidy,jmarsik/mopidy,glogiotatidis/mopidy,swak/mopidy,pacificIT/mopidy,pacificIT/mopidy,liamw9534/mopidy,mokieyue/mopidy,bacontext/mopidy,SuperStarPL/mopidy,quartz55/mopidy,vrs01/mopidy,ZenithDK/mopidy,swak/mopidy,ZenithDK/mopidy,jmarsik/mopidy,bacontext/mopidy,kingosticks/mopidy,ali/mopidy,rawdlite/mopidy,quartz55/mopidy,hkariti/mopidy,adamcik/mopidy,priestd09/mopidy,glogiotatidis/mopidy,abarisain/mopidy,ali/mopidy,pacificIT/mopidy,kingosticks/mopidy,diandiankan/mopidy,woutervanwijk/mopidy,bacontext/mopidy,glogiotatidis/mopidy,swak/mopidy,ali/mopidy,dbrgn/mopidy,adamcik/mopidy,abarisain/mopidy,mokieyue/mopidy,ZenithDK/mopidy,jcass77/mopidy,jodal/mopidy,rawdlite/mopidy,bacontext/mopidy,adamcik/mopidy,jodal/mopidy,bencevans/mopidy,mokieyue/mopidy,diandiankan/mopidy,vrs01/mopidy,tkem/mopidy,jodal/mopidy,glogiotatidis/mopidy,mokieyue/mopidy,priestd09/mopidy,bencevans/mopidy,mopidy/mopidy,ZenithDK/mopidy,SuperStarPL/mopidy,ali/mopidy,dbrgn/mopidy,jcass77/mopidy,jmarsik/mopidy,dbrgn/mopidy,SuperStarPL/mopidy,hkariti/mopidy,liamw9534/mopidy,pacificIT/mopidy,diandiankan/mopidy,priestd09/mopidy,mopidy/mopidy,bencevans/mopidy,jmarsik/mopidy,tkem/mopidy,kingosticks/mopidy,SuperStarPL/mopidy,rawdlite/mopidy,vrs01/mopidy,quartz55/mopidy,tkem/mopidy,mopidy/mopidy,woutervanwijk/mopidy
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
mpd: Add TODO for handling unknown outpitid
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
# TODO Return proper error on unknown outputid
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
# TODO Return proper error on unknown outputid
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
<commit_before>from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
<commit_msg>mpd: Add TODO for handling unknown outpitid<commit_after>
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
# TODO Return proper error on unknown outputid
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
# TODO Return proper error on unknown outputid
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
mpd: Add TODO for handling unknown outpitidfrom __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
# TODO Return proper error on unknown outputid
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
# TODO Return proper error on unknown outputid
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
<commit_before>from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
<commit_msg>mpd: Add TODO for handling unknown outpitid<commit_after>from __future__ import unicode_literals
from mopidy.frontends.mpd.protocol import handle_request
@handle_request(r'^disableoutput "(?P<outputid>\d+)"$')
def disableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``disableoutput``
Turns an output off.
"""
if int(outputid) == 0:
context.core.playback.set_mute(True)
# TODO Return proper error on unknown outputid
@handle_request(r'^enableoutput "(?P<outputid>\d+)"$')
def enableoutput(context, outputid):
"""
*musicpd.org, audio output section:*
``enableoutput``
Turns an output on.
"""
if int(outputid) == 0:
context.core.playback.set_mute(False)
# TODO Return proper error on unknown outputid
@handle_request(r'^outputs$')
def outputs(context):
"""
*musicpd.org, audio output section:*
``outputs``
Shows information about all outputs.
"""
enabled = 0 if context.core.playback.get_mute().get() else 1
return [
('outputid', 0),
('outputname', 'Default'),
('outputenabled', enabled),
]
|
87440383fc0c7260b07d625be54b8db8df4154fa
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
# Dirty requirements loads
requirements = []
with open('pip-requires') as f:
requirements = f.read().splitlines()
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
from setuptools import setup, find_packages
requirements = ['virtualenv', 'pyparsing==1.5.7', 'pydot==1.0.2']
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
Remove loading of requirements from pip-requires
|
Remove loading of requirements from pip-requires
|
Python
|
apache-2.0
|
pombredanne/PackMap,jmvrbanac/PackMap
|
from setuptools import setup, find_packages
# Dirty requirements loads
requirements = []
with open('pip-requires') as f:
requirements = f.read().splitlines()
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
Remove loading of requirements from pip-requires
|
from setuptools import setup, find_packages
requirements = ['virtualenv', 'pyparsing==1.5.7', 'pydot==1.0.2']
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
<commit_before>from setuptools import setup, find_packages
# Dirty requirements loads
requirements = []
with open('pip-requires') as f:
requirements = f.read().splitlines()
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
<commit_msg>Remove loading of requirements from pip-requires<commit_after>
|
from setuptools import setup, find_packages
requirements = ['virtualenv', 'pyparsing==1.5.7', 'pydot==1.0.2']
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
from setuptools import setup, find_packages
# Dirty requirements loads
requirements = []
with open('pip-requires') as f:
requirements = f.read().splitlines()
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
Remove loading of requirements from pip-requiresfrom setuptools import setup, find_packages
requirements = ['virtualenv', 'pyparsing==1.5.7', 'pydot==1.0.2']
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
<commit_before>from setuptools import setup, find_packages
# Dirty requirements loads
requirements = []
with open('pip-requires') as f:
requirements = f.read().splitlines()
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
<commit_msg>Remove loading of requirements from pip-requires<commit_after>from setuptools import setup, find_packages
requirements = ['virtualenv', 'pyparsing==1.5.7', 'pydot==1.0.2']
desc = ''
with open('README.rst') as f:
desc = f.read()
setup(
name='packmap',
version='0.0.1',
description=('PackMap discovers all dependencies for a specific'
'Python package'),
long_description=(desc),
url='https://github.com/jmvrbanac/PackMap',
author='John Vrbanac',
author_email='john.vrbanac@linux.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Topic :: Utilities'
],
keywords='discover package dependencies graph dependency requirement',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'packmap = packmap.cli:main'
],
},
)
|
26856ad4a5041fe0efa8711dc8ee62b31055282f
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
Add long description to PYPI project page
|
Add long description to PYPI project page
|
Python
|
mit
|
jsvine/markovify
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
Add long description to PYPI project page
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
<commit_before>from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
<commit_msg>Add long description to PYPI project page<commit_after>
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
Add long description to PYPI project pagefrom setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
<commit_before>from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description="",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
<commit_msg>Add long description to PYPI project page<commit_after>from setuptools import setup, find_packages
import sys, os
NAME = "markovify"
HERE = os.path.abspath(os.path.dirname(__file__))
version_ns = {}
with open(os.path.join(HERE, NAME, '__version__.py')) as f:
exec(f.read(), {}, version_ns)
with open(os.path.join(HERE, 'README.md')) as f:
long_description = f.read()
setup(
name="markovify",
version=version_ns['__version__'],
description="A simple, extensible Markov chain generator. Uses include generating random semi-plausible sentences based on an existing text.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="markov chain text",
author="Jeremy Singer-Vine",
author_email="jsvine@gmail.com",
url="http://github.com/jsvine/markovify",
license="MIT",
packages=find_packages(exclude=["test",]),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
"unidecode",
],
tests_require=[],
test_suite="test"
)
|
b32bcca1baf8854e62dfeae1f3dabb1d1ae98a5d
|
setup.py
|
setup.py
|
from setuptools import find_packages, setup
setup(
name='tchannel',
version='0.20.2.dev0',
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
from setuptools import find_packages, setup
import re
version = None
with open('tchannel/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from tchannel/__init__.py'
)
setup(
name='tchannel',
version=version,
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
Remove requirement to have version number in two places
|
Remove requirement to have version number in two places
|
Python
|
mit
|
uber/tchannel-python,uber/tchannel-python
|
from setuptools import find_packages, setup
setup(
name='tchannel',
version='0.20.2.dev0',
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
Remove requirement to have version number in two places
|
from setuptools import find_packages, setup
import re
version = None
with open('tchannel/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from tchannel/__init__.py'
)
setup(
name='tchannel',
version=version,
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
<commit_before>from setuptools import find_packages, setup
setup(
name='tchannel',
version='0.20.2.dev0',
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
<commit_msg>Remove requirement to have version number in two places<commit_after>
|
from setuptools import find_packages, setup
import re
version = None
with open('tchannel/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from tchannel/__init__.py'
)
setup(
name='tchannel',
version=version,
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
from setuptools import find_packages, setup
setup(
name='tchannel',
version='0.20.2.dev0',
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
Remove requirement to have version number in two placesfrom setuptools import find_packages, setup
import re
version = None
with open('tchannel/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from tchannel/__init__.py'
)
setup(
name='tchannel',
version=version,
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
<commit_before>from setuptools import find_packages, setup
setup(
name='tchannel',
version='0.20.2.dev0',
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
<commit_msg>Remove requirement to have version number in two places<commit_after>from setuptools import find_packages, setup
import re
version = None
with open('tchannel/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from tchannel/__init__.py'
)
setup(
name='tchannel',
version=version,
author=', '.join([
'Abhinav Gupta',
'Aiden Scandella',
'Bryce Lampe',
'Grayson Koonce',
'Junchao Wu',
]),
author_email='abg@uber.com',
description='Network multiplexing and framing protocol for RPC',
license='MIT',
url='https://github.com/uber/tchannel-python',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={
'': ['*.thrift'],
},
install_requires=[
# stdlib backports, no constraints needed
'contextlib2',
# external deps
'crcmod>=1,<2',
'tornado>=4.2,<5',
# tchannel deps
'thriftrw>=0.4,<2',
'threadloop>=1,<2',
],
extras_require={
'vcr': ['PyYAML', 'mock', 'wrapt'],
},
entry_points={
'console_scripts': [
'tcurl.py = tchannel.tcurl:start_ioloop'
]
},
)
|
7dcddf9bd6f3901770d949830cd9ab3168873ee1
|
setup.py
|
setup.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0.dev0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
Use custom version of core
|
Use custom version of core
|
Python
|
agpl-3.0
|
openfisca/country-template,openfisca/country-template
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
Use custom version of core
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0.dev0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
<commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
<commit_msg>Use custom version of core<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0.dev0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
Use custom version of core#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0.dev0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
<commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
<commit_msg>Use custom version of core<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='OpenFisca-Country-Template',
version='0.1.0',
author='OpenFisca Team',
author_email='contact@openfisca.fr',
description=u'Template of a tax and benefit system for OpenFisca',
keywords='benefit microsimulation social tax',
license='http://www.fsf.org/licensing/licenses/agpl-3.0.html',
include_package_data = True, # Will read MANIFEST.in
install_requires=[
'OpenFisca-Core >= 6.1.0.dev0, < 7.0',
],
packages=find_packages(),
test_suite='nose.collector',
)
|
d4bba92af13f30e8c816591a432857912011b381
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(exclude=['tests', 'tests.*', 'testproject']),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
Exclude test project from package - round 2
|
Exclude test project from package - round 2
Also change development status to Alpha
|
Python
|
bsd-2-clause
|
TargetHolding/django-cassandra-engine,TargetHolding/django-cassandra-engine,richardasaurus/django-cassandra-engine,slurms/django-cassandra-engine,maria-grigorieva/django_cassandra_engine,r4fek/django-cassandra-engine,bezineb5/django-cassandra-engine,slurms/django-cassandra-engine,slurms/django-cassandra-engine,TargetHolding/django-cassandra-engine,paksu/django-cassandra-engine,maria-grigorieva/django_cassandra_engine,r4fek/django-cassandra-engine
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(exclude=['tests', 'tests.*', 'testproject']),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
Exclude test project from package - round 2
Also change development status to Alpha
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
<commit_before>from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(exclude=['tests', 'tests.*', 'testproject']),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
<commit_msg>Exclude test project from package - round 2
Also change development status to Alpha<commit_after>
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(exclude=['tests', 'tests.*', 'testproject']),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
Exclude test project from package - round 2
Also change development status to Alphafrom setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
<commit_before>from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(exclude=['tests', 'tests.*', 'testproject']),
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
<commit_msg>Exclude test project from package - round 2
Also change development status to Alpha<commit_after>from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Cassandra backend for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=['cassandra-driver', 'djangotoolbox>=1.6.0', 'cqlengine'],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
2d29a3a635fecfdf83f0a043621abbacacaa43a1
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2",
"future"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
Add future as required package (for Python2)
|
Add future as required package (for Python2)
|
Python
|
apache-2.0
|
bstellato/mathprogbasepy
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
Add future as required package (for Python2)
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2",
"future"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
<commit_before>from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
<commit_msg>Add future as required package (for Python2)<commit_after>
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2",
"future"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
Add future as required package (for Python2)from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2",
"future"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
<commit_before>from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
<commit_msg>Add future as required package (for Python2)<commit_after>from setuptools import setup
setup(name='mathprogbasepy',
version='0.1.1',
author='Bartolomeo Stellato',
description='Low level interface for mathematical programming solvers.',
url='http://github.com/bstellato/mathprogbasepy/',
package_dir={'mathprogbasepy': 'mathprogbasepy'},
install_requires=["numpy >= 1.7",
"scipy >= 0.13.2",
"future"],
license='Apache 2.0',
packages=['mathprogbasepy',
'mathprogbasepy.quadprog',
'mathprogbasepy.quadprog.solvers',
'mathprogbasepy.unittests'])
|
762908c10fc3d9a6c9e30d9328e96c2a8bf3ce46
|
setup.py
|
setup.py
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
Fix description content type for PyPi
|
Fix description content type for PyPi
|
Python
|
mit
|
masterqa/MasterQA,mdmintz/MasterQA
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
Fix description content type for PyPi
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
<commit_before>"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
<commit_msg>Fix description content type for PyPi<commit_after>
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
Fix description content type for PyPi"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
<commit_before>"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
<commit_msg>Fix description content type for PyPi<commit_after>"""
The setup package to install MasterQA requirements
"""
from setuptools import setup, find_packages # noqa
from os import path
this_directory = path.abspath(path.dirname(__file__))
long_description = None
try:
with open(path.join(this_directory, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
except IOError:
long_description = (
'Automation-Assisted Manual Testing - http://masterqa.com')
setup(
name='masterqa',
version='1.1.5',
description='Automation-Assisted Manual Testing - http://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms='Mac * Windows * Linux',
url='http://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
53646338ce110b034b28bb13bb0400ab6f86b3eb
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
classifiers=[
"Programming Language :: Python :: 3"
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
Add Python 3 support via classifier
|
Add Python 3 support via classifier
|
Python
|
apache-2.0
|
jessemyers/openapi
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
Add Python 3 support via classifier
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
classifiers=[
"Programming Language :: Python :: 3"
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
<commit_msg>Add Python 3 support via classifier<commit_after>
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
classifiers=[
"Programming Language :: Python :: 3"
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
Add Python 3 support via classifier#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
classifiers=[
"Programming Language :: Python :: 3"
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
<commit_msg>Add Python 3 support via classifier<commit_after>#!/usr/bin/env python
from setuptools import find_packages, setup
project = "openapi"
version = "1.0.0"
setup(
name=project,
version=version,
description="Python OpenAPI 2.0 (Swagger) object model",
author="Globality Engineering",
author_email="engineering@globality.com",
url="https://github.com/globality-corp/openapi",
classifiers=[
"Programming Language :: Python :: 3"
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
keywords="openapi swagger",
install_requires=[
"inflection>=0.3.1",
"jsonschema>=2.6.0",
],
extras_require={
"yaml": [
"PyYAML>=3.12",
],
},
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
ffaac071ade9e1d05b12dec0d57b23b38c4975d7
|
setup.py
|
setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy Greenfeld',
author_email='aroy@alum.mit.edu',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
)
|
Update trove classifier to Stable. Update my info.
|
Update trove classifier to Stable. Update my info.
|
Python
|
bsd-3-clause
|
audreyr/binaryornot,audreyr/binaryornot,audreyr/binaryornot
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
Update trove classifier to Stable. Update my info.
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy Greenfeld',
author_email='aroy@alum.mit.edu',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
)
|
<commit_before>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
<commit_msg>Update trove classifier to Stable. Update my info.<commit_after>
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy Greenfeld',
author_email='aroy@alum.mit.edu',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
)
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
Update trove classifier to Stable. Update my info.#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy Greenfeld',
author_email='aroy@alum.mit.edu',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
)
|
<commit_before>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
)
<commit_msg>Update trove classifier to Stable. Update my info.<commit_after>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='binaryornot',
version='0.4.0',
description=(
'Ultra-lightweight pure Python package to check '
'if a file is binary or text.'
),
long_description=readme + '\n\n' + history,
author='Audrey Roy Greenfeld',
author_email='aroy@alum.mit.edu',
url='https://github.com/audreyr/binaryornot',
packages=[
'binaryornot',
],
package_dir={'binaryornot': 'binaryornot'},
include_package_data=True,
install_requires=[
'chardet>=2.0.0',
],
license="BSD",
zip_safe=False,
keywords='binaryornot',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
)
|
3cad766f5c42f6dc169eb406e6d3d7ff7a530d42
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.23',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.24',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
Update the PyPI version to 0.2.24.
|
Update the PyPI version to 0.2.24.
|
Python
|
mit
|
Doist/todoist-python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.23',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.24.
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.24',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.23',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.24.<commit_after>
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.24',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.23',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.24.# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.24',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.23',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.24.<commit_after># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.24',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
fbb9c2bc6f29b059da09764b563441ae687aee47
|
contentcuration/contentcuration/utils/asynccommand.py
|
contentcuration/contentcuration/utils/asynccommand.py
|
from abc import abstractmethod
from django.core.management.base import BaseCommand
class TaskCommand(BaseCommand):
def start_progress(self, *args, **options):
# TODO: needs implementation
pass
def update_progress(self, *args, **options):
# TODO: needs implementation
pass
@abstractmethod
def handle_async(self, *args, **options):
pass
|
from abc import abstractmethod
from collections import namedtuple
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
Progress = namedtuple(
'Progress',
[
'progress',
'total',
'fraction',
]
)
class TaskCommand(BaseCommand):
def handle(self, *args, **options):
self.progresstracker = None
return self.handle_async(*args, **options)
def start_progress(self, total):
self.progresstracker = Progress(progress=0, total=total, fraction=0)
def update_progress(self, increment):
tracker = self.progresstracker
progress = tracker.progress + increment
if progress > tracker.total:
raise CommandError("Progress reaches over 100%.")
fraction = 1.0 * progress / tracker.total
updated_tracker = tracker._replace(progress=progress, fraction=fraction)
self.progresstracker = updated_tracker
@abstractmethod
def handle_async(self, *args, **options):
pass
|
Add a progress tracker to the async task command
|
Add a progress tracker to the async task command
|
Python
|
mit
|
DXCanas/content-curation,fle-internal/content-curation,DXCanas/content-curation,fle-internal/content-curation,DXCanas/content-curation,fle-internal/content-curation,fle-internal/content-curation,DXCanas/content-curation
|
from abc import abstractmethod
from django.core.management.base import BaseCommand
class TaskCommand(BaseCommand):
def start_progress(self, *args, **options):
# TODO: needs implementation
pass
def update_progress(self, *args, **options):
# TODO: needs implementation
pass
@abstractmethod
def handle_async(self, *args, **options):
pass
Add a progress tracker to the async task command
|
from abc import abstractmethod
from collections import namedtuple
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
Progress = namedtuple(
'Progress',
[
'progress',
'total',
'fraction',
]
)
class TaskCommand(BaseCommand):
def handle(self, *args, **options):
self.progresstracker = None
return self.handle_async(*args, **options)
def start_progress(self, total):
self.progresstracker = Progress(progress=0, total=total, fraction=0)
def update_progress(self, increment):
tracker = self.progresstracker
progress = tracker.progress + increment
if progress > tracker.total:
raise CommandError("Progress reaches over 100%.")
fraction = 1.0 * progress / tracker.total
updated_tracker = tracker._replace(progress=progress, fraction=fraction)
self.progresstracker = updated_tracker
@abstractmethod
def handle_async(self, *args, **options):
pass
|
<commit_before>from abc import abstractmethod
from django.core.management.base import BaseCommand
class TaskCommand(BaseCommand):
def start_progress(self, *args, **options):
# TODO: needs implementation
pass
def update_progress(self, *args, **options):
# TODO: needs implementation
pass
@abstractmethod
def handle_async(self, *args, **options):
pass
<commit_msg>Add a progress tracker to the async task command<commit_after>
|
from abc import abstractmethod
from collections import namedtuple
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
Progress = namedtuple(
'Progress',
[
'progress',
'total',
'fraction',
]
)
class TaskCommand(BaseCommand):
def handle(self, *args, **options):
self.progresstracker = None
return self.handle_async(*args, **options)
def start_progress(self, total):
self.progresstracker = Progress(progress=0, total=total, fraction=0)
def update_progress(self, increment):
tracker = self.progresstracker
progress = tracker.progress + increment
if progress > tracker.total:
raise CommandError("Progress reaches over 100%.")
fraction = 1.0 * progress / tracker.total
updated_tracker = tracker._replace(progress=progress, fraction=fraction)
self.progresstracker = updated_tracker
@abstractmethod
def handle_async(self, *args, **options):
pass
|
from abc import abstractmethod
from django.core.management.base import BaseCommand
class TaskCommand(BaseCommand):
def start_progress(self, *args, **options):
# TODO: needs implementation
pass
def update_progress(self, *args, **options):
# TODO: needs implementation
pass
@abstractmethod
def handle_async(self, *args, **options):
pass
Add a progress tracker to the async task commandfrom abc import abstractmethod
from collections import namedtuple
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
Progress = namedtuple(
'Progress',
[
'progress',
'total',
'fraction',
]
)
class TaskCommand(BaseCommand):
def handle(self, *args, **options):
self.progresstracker = None
return self.handle_async(*args, **options)
def start_progress(self, total):
self.progresstracker = Progress(progress=0, total=total, fraction=0)
def update_progress(self, increment):
tracker = self.progresstracker
progress = tracker.progress + increment
if progress > tracker.total:
raise CommandError("Progress reaches over 100%.")
fraction = 1.0 * progress / tracker.total
updated_tracker = tracker._replace(progress=progress, fraction=fraction)
self.progresstracker = updated_tracker
@abstractmethod
def handle_async(self, *args, **options):
pass
|
<commit_before>from abc import abstractmethod
from django.core.management.base import BaseCommand
class TaskCommand(BaseCommand):
def start_progress(self, *args, **options):
# TODO: needs implementation
pass
def update_progress(self, *args, **options):
# TODO: needs implementation
pass
@abstractmethod
def handle_async(self, *args, **options):
pass
<commit_msg>Add a progress tracker to the async task command<commit_after>from abc import abstractmethod
from collections import namedtuple
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
Progress = namedtuple(
'Progress',
[
'progress',
'total',
'fraction',
]
)
class TaskCommand(BaseCommand):
def handle(self, *args, **options):
self.progresstracker = None
return self.handle_async(*args, **options)
def start_progress(self, total):
self.progresstracker = Progress(progress=0, total=total, fraction=0)
def update_progress(self, increment):
tracker = self.progresstracker
progress = tracker.progress + increment
if progress > tracker.total:
raise CommandError("Progress reaches over 100%.")
fraction = 1.0 * progress / tracker.total
updated_tracker = tracker._replace(progress=progress, fraction=fraction)
self.progresstracker = updated_tracker
@abstractmethod
def handle_async(self, *args, **options):
pass
|
10cde8f4df66775479e509a994afd0c3f1e9041d
|
armstrong/apps/embedded_video/tests/_utils.py
|
armstrong/apps/embedded_video/tests/_utils.py
|
from django.test import TestCase as DjangoTestCase
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase as DjangoTestCase
def create_concrete_table(func):
style = no_style()
seen_models = connection.introspection.installed_models(
connection.introspection.table_names())
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql, _references = connection.creation.sql_create_model(self.model,
style, seen_models)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
def destroy_concrete_table(func):
style = no_style()
# Assume that there are no references to destroy, these are supposed to be
# simple models
references = {}
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql = connection.creation.sql_destroy_model(self.model, references,
style)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
Add two new helpers for creating concrete model tables on the fly
|
Add two new helpers for creating concrete model tables on the fly
These are extremely simple, but they provide the basis for dynamically
creating a model table and deleting it for each test case. Going this
route instead of setting it up per TestCase to allow running in
parallel---each test method is capable of being running by itself.
|
Python
|
apache-2.0
|
armstrong/armstrong.apps.embedded_video
|
from django.test import TestCase as DjangoTestCase
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
Add two new helpers for creating concrete model tables on the fly
These are extremely simple, but they provide the basis for dynamically
creating a model table and deleting it for each test case. Going this
route instead of setting it up per TestCase to allow running in
parallel---each test method is capable of being running by itself.
|
from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase as DjangoTestCase
def create_concrete_table(func):
style = no_style()
seen_models = connection.introspection.installed_models(
connection.introspection.table_names())
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql, _references = connection.creation.sql_create_model(self.model,
style, seen_models)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
def destroy_concrete_table(func):
style = no_style()
# Assume that there are no references to destroy, these are supposed to be
# simple models
references = {}
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql = connection.creation.sql_destroy_model(self.model, references,
style)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
<commit_before>from django.test import TestCase as DjangoTestCase
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
<commit_msg>Add two new helpers for creating concrete model tables on the fly
These are extremely simple, but they provide the basis for dynamically
creating a model table and deleting it for each test case. Going this
route instead of setting it up per TestCase to allow running in
parallel---each test method is capable of being running by itself.<commit_after>
|
from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase as DjangoTestCase
def create_concrete_table(func):
style = no_style()
seen_models = connection.introspection.installed_models(
connection.introspection.table_names())
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql, _references = connection.creation.sql_create_model(self.model,
style, seen_models)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
def destroy_concrete_table(func):
style = no_style()
# Assume that there are no references to destroy, these are supposed to be
# simple models
references = {}
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql = connection.creation.sql_destroy_model(self.model, references,
style)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
from django.test import TestCase as DjangoTestCase
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
Add two new helpers for creating concrete model tables on the fly
These are extremely simple, but they provide the basis for dynamically
creating a model table and deleting it for each test case. Going this
route instead of setting it up per TestCase to allow running in
parallel---each test method is capable of being running by itself.from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase as DjangoTestCase
def create_concrete_table(func):
style = no_style()
seen_models = connection.introspection.installed_models(
connection.introspection.table_names())
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql, _references = connection.creation.sql_create_model(self.model,
style, seen_models)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
def destroy_concrete_table(func):
style = no_style()
# Assume that there are no references to destroy, these are supposed to be
# simple models
references = {}
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql = connection.creation.sql_destroy_model(self.model, references,
style)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
<commit_before>from django.test import TestCase as DjangoTestCase
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
<commit_msg>Add two new helpers for creating concrete model tables on the fly
These are extremely simple, but they provide the basis for dynamically
creating a model table and deleting it for each test case. Going this
route instead of setting it up per TestCase to allow running in
parallel---each test method is capable of being running by itself.<commit_after>from django.core.management.color import no_style
from django.db import connection
from django.test import TestCase as DjangoTestCase
def create_concrete_table(func):
style = no_style()
seen_models = connection.introspection.installed_models(
connection.introspection.table_names())
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql, _references = connection.creation.sql_create_model(self.model,
style, seen_models)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
def destroy_concrete_table(func):
style = no_style()
# Assume that there are no references to destroy, these are supposed to be
# simple models
references = {}
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
sql = connection.creation.sql_destroy_model(self.model, references,
style)
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
return inner
# TODO: pull into a common dev package so all armstrong code can use it
def concrete(klass):
attrs = {'__module__': concrete.__module__, }
return type("Concrete%s" % klass.__name__, (klass, ), attrs)
class TestCase(DjangoTestCase):
def assertModelHasField(self, model, field_name, field_class=None):
self.assertTrue(hasattr(model, field_name))
field = model._meta.get_field_by_name(field_name)[0]
if field_class is not None:
self.assertTrue(isinstance(field, field_class))
|
bc48dd6e6b28406874cb3c3fda6c91cc90c77bb7
|
examples/users.py
|
examples/users.py
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
bob = olite.users.create(name='bob', key='my-awesome-key')
alice = olite.users.create(name='alice', key_path='~/.ssh/id_rsa.pub')
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
# create user object
vlad = olite.users.create(name='vlad', key_path='/home/wok/.ssh/id_rsa.pub')
# get user by name
vlad = olite.users.get(name='vlad')
# get_or_create django style
vlad = olite.users.get_or_create(name='vlad')
# check if user is admin or not
print vlad.is_admin
|
Introduce a complete user operations example
|
Introduce a complete user operations example
|
Python
|
bsd-2-clause
|
shawkinsl/pyolite,PressLabs/pyolite
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
bob = olite.users.create(name='bob', key='my-awesome-key')
alice = olite.users.create(name='alice', key_path='~/.ssh/id_rsa.pub')
Introduce a complete user operations example
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
# create user object
vlad = olite.users.create(name='vlad', key_path='/home/wok/.ssh/id_rsa.pub')
# get user by name
vlad = olite.users.get(name='vlad')
# get_or_create django style
vlad = olite.users.get_or_create(name='vlad')
# check if user is admin or not
print vlad.is_admin
|
<commit_before>from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
bob = olite.users.create(name='bob', key='my-awesome-key')
alice = olite.users.create(name='alice', key_path='~/.ssh/id_rsa.pub')
<commit_msg>Introduce a complete user operations example<commit_after>
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
# create user object
vlad = olite.users.create(name='vlad', key_path='/home/wok/.ssh/id_rsa.pub')
# get user by name
vlad = olite.users.get(name='vlad')
# get_or_create django style
vlad = olite.users.get_or_create(name='vlad')
# check if user is admin or not
print vlad.is_admin
|
from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
bob = olite.users.create(name='bob', key='my-awesome-key')
alice = olite.users.create(name='alice', key_path='~/.ssh/id_rsa.pub')
Introduce a complete user operations examplefrom pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
# create user object
vlad = olite.users.create(name='vlad', key_path='/home/wok/.ssh/id_rsa.pub')
# get user by name
vlad = olite.users.get(name='vlad')
# get_or_create django style
vlad = olite.users.get_or_create(name='vlad')
# check if user is admin or not
print vlad.is_admin
|
<commit_before>from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
bob = olite.users.create(name='bob', key='my-awesome-key')
alice = olite.users.create(name='alice', key_path='~/.ssh/id_rsa.pub')
<commit_msg>Introduce a complete user operations example<commit_after>from pyolite import Pyolite
# initial olite object
admin_repository = 'gitolite-admin/'
olite = Pyolite(admin_repository=admin_repository)
# create user object
vlad = olite.users.create(name='vlad', key_path='/home/wok/.ssh/id_rsa.pub')
# get user by name
vlad = olite.users.get(name='vlad')
# get_or_create django style
vlad = olite.users.get_or_create(name='vlad')
# check if user is admin or not
print vlad.is_admin
|
10561287678c1621927757de0328f7275fc764d8
|
api/nodes/urls.py
|
api/nodes/urls.py
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/confirm/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.
|
Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.
|
Python
|
apache-2.0
|
binoculars/osf.io,jnayak1/osf.io,Johnetordoff/osf.io,GageGaskins/osf.io,MerlinZhang/osf.io,mluo613/osf.io,icereval/osf.io,hmoco/osf.io,saradbowman/osf.io,icereval/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,SSJohns/osf.io,dplorimer/osf,wearpants/osf.io,fabianvf/osf.io,HarryRybacki/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,aaxelb/osf.io,kwierman/osf.io,leb2dg/osf.io,mattclark/osf.io,doublebits/osf.io,njantrania/osf.io,jmcarp/osf.io,cwisecarver/osf.io,emetsger/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,caseyrollins/osf.io,acshi/osf.io,doublebits/osf.io,caseyrollins/osf.io,kch8qx/osf.io,ticklemepierce/osf.io,MerlinZhang/osf.io,Nesiehr/osf.io,mfraezz/osf.io,sbt9uc/osf.io,erinspace/osf.io,alexschiller/osf.io,dplorimer/osf,danielneis/osf.io,KAsante95/osf.io,reinaH/osf.io,jolene-esposito/osf.io,danielneis/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,billyhunt/osf.io,reinaH/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,ZobairAlijan/osf.io,jmcarp/osf.io,aaxelb/osf.io,Nesiehr/osf.io,KAsante95/osf.io,zachjanicki/osf.io,mattclark/osf.io,caneruguz/osf.io,crcresearch/osf.io,alexschiller/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,jeffreyliu3230/osf.io,kwierman/osf.io,brandonPurvis/osf.io,baylee-d/osf.io,arpitar/osf.io,TomHeatwole/osf.io,lyndsysimon/osf.io,CenterForOpenScience/osf.io,bdyetton/prettychart,njantrania/osf.io,billyhunt/osf.io,rdhyee/osf.io,mluo613/osf.io,cosenal/osf.io,reinaH/osf.io,zachjanicki/osf.io,samanehsan/osf.io,cosenal/osf.io,hmoco/osf.io,dplorimer/osf,caseyrygt/osf.io,wearpants/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,mluke93/osf.io,rdhyee/osf.io,GageGaskins/osf.io,amyshi188/osf.io,icereval/osf.io,jnayak1/osf.io,DanielSBrown/osf.io,chennan47/osf.io,njantrania/osf.io,jinluyuan/osf.io,sbt9uc/osf.io,sloria/osf.io,lyndsysimon/osf.io,arpitar/osf.io,jolene-esposito/osf.io,caneruguz/osf.io,cslzchen/osf.io,kch8qx/osf.io,cwisecarver/osf.io,caseyrygt/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,TomHeatwole/osf.io,acshi/osf.io,GageGaskins/osf.io,mfraezz/osf.io,ticklemepierce/osf.io,felliott/osf.io,chrisseto/osf.io,DanielSBrown/osf.io,ZobairAlijan/osf.io,asanfilippo7/osf.io,alexschiller/osf.io,samanehsan/osf.io,billyhunt/osf.io,RomanZWang/osf.io,cwisecarver/osf.io,KAsante95/osf.io,mluke93/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,doublebits/osf.io,ckc6cz/osf.io,abought/osf.io,amyshi188/osf.io,MerlinZhang/osf.io,jmcarp/osf.io,bdyetton/prettychart,alexschiller/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,sbt9uc/osf.io,adlius/osf.io,HalcyonChimera/osf.io,cldershem/osf.io,billyhunt/osf.io,petermalcolm/osf.io,Johnetordoff/osf.io,caseyrygt/osf.io,wearpants/osf.io,cldershem/osf.io,jnayak1/osf.io,cldershem/osf.io,baylee-d/osf.io,cslzchen/osf.io,samchrisinger/osf.io,abought/osf.io,mluo613/osf.io,jinluyuan/osf.io,Ghalko/osf.io,ckc6cz/osf.io,danielneis/osf.io,erinspace/osf.io,samchrisinger/osf.io,ckc6cz/osf.io,adlius/osf.io,HarryRybacki/osf.io,TomBaxter/osf.io,asanfilippo7/osf.io,sloria/osf.io,HarryRybacki/osf.io,acshi/osf.io,zamattiac/osf.io,asanfilippo7/osf.io,crcresearch/osf.io,sloria/osf.io,hmoco/osf.io,danielneis/osf.io,leb2dg/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,jolene-esposito/osf.io,amyshi188/osf.io,SSJohns/osf.io,RomanZWang/osf.io,kwierman/osf.io,cosenal/osf.io,mattclark/osf.io,jeffreyliu3230/osf.io,lyndsysimon/osf.io,bdyetton/prettychart,petermalcolm/osf.io,mluo613/osf.io,wearpants/osf.io,ZobairAlijan/osf.io,CenterForOpenScience/osf.io,abought/osf.io,fabianvf/osf.io,doublebits/osf.io,laurenrevere/osf.io,baylee-d/osf.io,lyndsysimon/osf.io,erinspace/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,ZobairAlijan/osf.io,RomanZWang/osf.io,samanehsan/osf.io,brandonPurvis/osf.io,zamattiac/osf.io,kch8qx/osf.io,rdhyee/osf.io,Ghalko/osf.io,GageGaskins/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,jolene-esposito/osf.io,jeffreyliu3230/osf.io,arpitar/osf.io,binoculars/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,pattisdr/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,acshi/osf.io,saradbowman/osf.io,haoyuchen1992/osf.io,chennan47/osf.io,dplorimer/osf,arpitar/osf.io,alexschiller/osf.io,crcresearch/osf.io,zachjanicki/osf.io,samchrisinger/osf.io,jnayak1/osf.io,haoyuchen1992/osf.io,petermalcolm/osf.io,felliott/osf.io,felliott/osf.io,sbt9uc/osf.io,bdyetton/prettychart,binoculars/osf.io,mfraezz/osf.io,chennan47/osf.io,zamattiac/osf.io,adlius/osf.io,billyhunt/osf.io,abought/osf.io,reinaH/osf.io,acshi/osf.io,fabianvf/osf.io,KAsante95/osf.io,brandonPurvis/osf.io,emetsger/osf.io,DanielSBrown/osf.io,fabianvf/osf.io,KAsante95/osf.io,caseyrygt/osf.io,haoyuchen1992/osf.io,samchrisinger/osf.io,cldershem/osf.io,haoyuchen1992/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,doublebits/osf.io,samanehsan/osf.io,ticklemepierce/osf.io,zachjanicki/osf.io,caneruguz/osf.io,cslzchen/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,mfraezz/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,jeffreyliu3230/osf.io,adlius/osf.io,cslzchen/osf.io,Ghalko/osf.io,pattisdr/osf.io,cosenal/osf.io,ckc6cz/osf.io,jinluyuan/osf.io,Ghalko/osf.io,laurenrevere/osf.io,kwierman/osf.io,chrisseto/osf.io,asanfilippo7/osf.io,kch8qx/osf.io,GageGaskins/osf.io,MerlinZhang/osf.io,mluke93/osf.io,kch8qx/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,leb2dg/osf.io,hmoco/osf.io,mluo613/osf.io,HarryRybacki/osf.io,brandonPurvis/osf.io,jinluyuan/osf.io,njantrania/osf.io,jmcarp/osf.io,mluke93/osf.io,aaxelb/osf.io,leb2dg/osf.io
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/confirm/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
<commit_before>from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
<commit_msg>Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.<commit_after>
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/confirm/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/confirm/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
<commit_before>from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
<commit_msg>Change confirm delete url. /v2/nodes/{pk}/pointers was being read as /v2/nodes/{pk}/{token} and returning 'Incorrect token' instead of pointers list.<commit_after>from django.conf.urls import url
from api.nodes import views
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name='node-list'),
url(r'^(?P<pk>\w+)/$', views.NodeDetail.as_view(), name='node-detail'),
url(r'^(?P<pk>\w+)/confirm/(?P<token>\w+)$', views.NodeDeleteConfirm.as_view(), name='node-delete-confirm'),
url(r'^(?P<pk>\w+)/contributors/$', views.NodeContributorsList.as_view(), name='node-contributors'),
url(r'^(?P<pk>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name='node-registrations'),
url(r'^(?P<pk>\w+)/children/$', views.NodeChildrenList.as_view(), name='node-children'),
url(r'^(?P<pk>\w+)/pointers/$', views.NodePointersList.as_view(), name='node-pointers'),
url(r'^(?P<pk>\w+)/files/$', views.NodeFilesList.as_view(), name='node-files'),
url(r'^(?P<pk>\w+)/pointers/(?P<pointer_id>\w+)', views.NodePointerDetail.as_view(), name='node-pointer-detail'),
]
|
fa610209334a53cd29441429609c5b045641b4d7
|
exp/lib/models/content_node.py
|
exp/lib/models/content_node.py
|
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
import urllib
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_variant_url(self, variant_name):
query = '?variant={0}'.format(variant_name)
return api_utils.generate_url('/api/delivery' + self.document.get('path')) + query
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
Add get_variant_url method to content node.
|
Add get_variant_url method to content node.
|
Python
|
mit
|
ScalaInc/exp-python2-sdk,ScalaInc/exp-python2-sdk
|
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
Add get_variant_url method to content node.
|
import urllib
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_variant_url(self, variant_name):
query = '?variant={0}'.format(variant_name)
return api_utils.generate_url('/api/delivery' + self.document.get('path')) + query
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
<commit_before>from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
<commit_msg>Add get_variant_url method to content node.<commit_after>
|
import urllib
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_variant_url(self, variant_name):
query = '?variant={0}'.format(variant_name)
return api_utils.generate_url('/api/delivery' + self.document.get('path')) + query
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
Add get_variant_url method to content node.import urllib
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_variant_url(self, variant_name):
query = '?variant={0}'.format(variant_name)
return api_utils.generate_url('/api/delivery' + self.document.get('path')) + query
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
<commit_before>from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
<commit_msg>Add get_variant_url method to content node.<commit_after>import urllib
from .. import api_utils
class ContentNode(object):
def __init__(self, document, _isChildrenPopulated=False):
self.document = document
self._isChildrenPopulated = _isChildrenPopulated
def get_url(self):
return api_utils.generate_url("/api/delivery" + self.document.get("path"))
def get_variant_url(self, variant_name):
query = '?variant={0}'.format(variant_name)
return api_utils.generate_url('/api/delivery' + self.document.get('path')) + query
def get_children(self):
if not self._isChildrenPopulated:
self.document = api_utils.get('/api/content/' + self.document.get("uuid") + '/children')
self._isChildrenPopulated = True
return [ContentNode(x) for x in self.document.get("children")]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.