commit stringlengths 40 40 | old_file stringlengths 4 118 | new_file stringlengths 4 118 | old_contents stringlengths 0 2.94k | new_contents stringlengths 1 4.43k | subject stringlengths 15 444 | message stringlengths 16 3.45k | lang stringclasses 1 value | license stringclasses 13 values | repos stringlengths 5 43.2k | prompt stringlengths 17 4.58k | response stringlengths 1 4.43k | prompt_tagged stringlengths 58 4.62k | response_tagged stringlengths 1 4.43k | text stringlengths 132 7.29k | text_tagged stringlengths 173 7.33k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c64060482a34036615805b13416c78bb78a3058a | swh/web/common/urlsindex.py | swh/web/common/urlsindex.py | # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
| # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
if view_name:
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
else:
cls.urlpatterns[cls.scope].append(url(url_pattern, view))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
| Allow to register unnamed django views | common: Allow to register unnamed django views
| Python | agpl-3.0 | SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui,SoftwareHeritage/swh-web-ui | # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
common: Allow to register unnamed django views | # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
if view_name:
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
else:
cls.urlpatterns[cls.scope].append(url(url_pattern, view))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
| <commit_before># Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
<commit_msg>common: Allow to register unnamed django views<commit_after> | # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
if view_name:
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
else:
cls.urlpatterns[cls.scope].append(url(url_pattern, view))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
| # Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
common: Allow to register unnamed django views# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
if view_name:
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
else:
cls.urlpatterns[cls.scope].append(url(url_pattern, view))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
| <commit_before># Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
<commit_msg>common: Allow to register unnamed django views<commit_after># Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django.conf.urls import url
class UrlsIndex(object):
"""
Simple helper class for centralizing url patterns of a Django
web application.
Derived classes should override the 'scope' class attribute otherwise
all declared patterns will be grouped under the default one.
"""
urlpatterns = {}
scope = 'default'
@classmethod
def add_url_pattern(cls, url_pattern, view, view_name):
"""
Class method that adds an url pattern to the current scope.
Args:
url_pattern: regex describing a Django url
view: function implementing the Django view
view_name: name of the view used to reverse the url
"""
if cls.scope not in cls.urlpatterns:
cls.urlpatterns[cls.scope] = []
if view_name:
cls.urlpatterns[cls.scope].append(url(url_pattern, view,
name=view_name))
else:
cls.urlpatterns[cls.scope].append(url(url_pattern, view))
@classmethod
def get_url_patterns(cls):
"""
Class method that returns the list of url pattern associated to
the current scope.
Returns:
The list of url patterns associated to the current scope
"""
return cls.urlpatterns[cls.scope]
|
ee1e19b023bfed3dfec606bc2bbe08876caf80ef | parse-demo.py | parse-demo.py | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(wordtag[0] for wordtag in subtree.leaves()))
| #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(word for (word, tag) in subtree.leaves()))
| Use better syntax for unpacking tuples | Use better syntax for unpacking tuples
| Python | mit | alexander-bauer/syllabus-summary | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(wordtag[0] for wordtag in subtree.leaves()))
Use better syntax for unpacking tuples | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(word for (word, tag) in subtree.leaves()))
| <commit_before>#!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(wordtag[0] for wordtag in subtree.leaves()))
<commit_msg>Use better syntax for unpacking tuples<commit_after> | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(word for (word, tag) in subtree.leaves()))
| #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(wordtag[0] for wordtag in subtree.leaves()))
Use better syntax for unpacking tuples#!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(word for (word, tag) in subtree.leaves()))
| <commit_before>#!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(wordtag[0] for wordtag in subtree.leaves()))
<commit_msg>Use better syntax for unpacking tuples<commit_after>#!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
raw_sentences = nltk.sent_tokenize(data)
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in raw_sentences]
# Define a grammar, and identify the noun phrases in the sentences.
# TODO: Look into using exclusive grammars to discard prepositional
# phrases, and such.
chunk_parser = nltk.RegexpParser("""
NP: {<PRP|NN|NNP|CD>+}
NPR: {((<DT|PRP\$>)?<JJ>*(<NP|CC>)+)}
""")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for index, tree in enumerate(trees):
print("===\nSentence: %s\nNoun phrases:" %
raw_sentences[index].replace('\n', ' '))
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NPR'):
print(" %s" % subtree)
print("Key elements:")
for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
print(" %s" % ' '.join(word for (word, tag) in subtree.leaves()))
|
28c8dfd6e3da8525d8379a46244a510db9c34aa5 | pytablewriter/writer/text/_spacealigned.py | pytablewriter/writer/text/_spacealigned.py | import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
| import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.char_cross_point = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
| Fix constructor for SpaceAlignedTableWriter class | Fix constructor for SpaceAlignedTableWriter class
| Python | mit | thombashi/pytablewriter | import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
Fix constructor for SpaceAlignedTableWriter class | import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.char_cross_point = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
| <commit_before>import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
<commit_msg>Fix constructor for SpaceAlignedTableWriter class<commit_after> | import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.char_cross_point = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
| import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
Fix constructor for SpaceAlignedTableWriter classimport copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.char_cross_point = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
| <commit_before>import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
<commit_msg>Fix constructor for SpaceAlignedTableWriter class<commit_after>import copy
import dataproperty
from ._csv import CsvTableWriter
class SpaceAlignedTableWriter(CsvTableWriter):
"""
A table writer class for space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
.. py:method:: write_table
|write_table| with space aligned format.
:Example:
:ref:`example-space-aligned-table-writer`
"""
FORMAT_NAME = "space_aligned"
@property
def format_name(self) -> str:
return self.FORMAT_NAME
def __init__(self) -> None:
super().__init__()
self.column_delimiter = " "
self.char_cross_point = " "
self.is_padding = True
self.is_formatting_float = True
self._quoting_flags = copy.deepcopy(dataproperty.NOT_QUOTING_FLAGS)
|
9d70dc1f82fb807c02f4ccfa04bef7f6da36cbc6 | cluster/context_processors.py | cluster/context_processors.py | from models import Job
def running_jobs(request):
if request.user.is_authenticated():
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| from models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user)
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| Add a hack so that the number of jobs running will update correctly | Add a hack so that the number of jobs running will update correctly
| Python | mit | crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp | from models import Job
def running_jobs(request):
if request.user.is_authenticated():
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
Add a hack so that the number of jobs running will update correctly | from models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user)
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| <commit_before>from models import Job
def running_jobs(request):
if request.user.is_authenticated():
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
<commit_msg>Add a hack so that the number of jobs running will update correctly<commit_after> | from models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user)
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| from models import Job
def running_jobs(request):
if request.user.is_authenticated():
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
Add a hack so that the number of jobs running will update correctlyfrom models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user)
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
| <commit_before>from models import Job
def running_jobs(request):
if request.user.is_authenticated():
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
<commit_msg>Add a hack so that the number of jobs running will update correctly<commit_after>from models import Job
from interface import get_all_jobs
def running_jobs(request):
if request.user.is_authenticated():
# hack to get numbers to update
get_all_jobs(request.user)
temp = len(Job.get_running_jobs(user=request.user))
return {"running_jobs": temp}
else:
return {"running_jobs": None}
|
e46c0f037e88c245f3c4dfa94c155b6a0a58d90a | sunpy/data/setup_package.py | sunpy/data/setup_package.py | def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*.*', '*/*.*']}
| def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*', '*/*']}
| Fix the timeseries tests in the installed package | Fix the timeseries tests in the installed package
| Python | bsd-2-clause | dpshelio/sunpy,dpshelio/sunpy,dpshelio/sunpy | def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*.*', '*/*.*']}
Fix the timeseries tests in the installed package | def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*', '*/*']}
| <commit_before>def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*.*', '*/*.*']}
<commit_msg>Fix the timeseries tests in the installed package<commit_after> | def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*', '*/*']}
| def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*.*', '*/*.*']}
Fix the timeseries tests in the installed packagedef get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*', '*/*']}
| <commit_before>def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*.*', '*/*.*']}
<commit_msg>Fix the timeseries tests in the installed package<commit_after>def get_package_data():
return {'sunpy.data': ['sunpyrc'],
'sunpy.data.test': ['*', '*/*']}
|
9ce80d4b4a27e5a32504c6b00ffcff846c53a649 | froide/publicbody/widgets.py | froide/publicbody/widgets.py | import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super().get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
| import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
| Fix super call for Python 2.7 | Fix super call for Python 2.7 | Python | mit | fin/froide,stefanw/froide,stefanw/froide,fin/froide,stefanw/froide,fin/froide,stefanw/froide,fin/froide,stefanw/froide | import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super().get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
Fix super call for Python 2.7 | import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
| <commit_before>import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super().get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
<commit_msg>Fix super call for Python 2.7<commit_after> | import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
| import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super().get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
Fix super call for Python 2.7import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
| <commit_before>import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super().get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
<commit_msg>Fix super call for Python 2.7<commit_after>import json
from django import forms
from .models import PublicBody
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': pb.as_data() if pb is not None else None
}
}
})
})
return context
|
fa01bae61830e501e62997f456f9533b654eb425 | utils.py | utils.py | import numpy as np
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
| import numpy as np
from sklearn import cross_validation
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
def train_test_split(X, y, test_size=0.2):
data = cross_validation.train_test_split(X, y, test_size=test_size)
training = data[0], data[2]
test = data[1], data[3]
return training, test
| Add data splitter for cross validation | Add data splitter for cross validation
| Python | mit | IshitaTakeshi/SCW | import numpy as np
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
Add data splitter for cross validation | import numpy as np
from sklearn import cross_validation
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
def train_test_split(X, y, test_size=0.2):
data = cross_validation.train_test_split(X, y, test_size=test_size)
training = data[0], data[2]
test = data[1], data[3]
return training, test
| <commit_before>import numpy as np
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
<commit_msg>Add data splitter for cross validation<commit_after> | import numpy as np
from sklearn import cross_validation
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
def train_test_split(X, y, test_size=0.2):
data = cross_validation.train_test_split(X, y, test_size=test_size)
training = data[0], data[2]
test = data[1], data[3]
return training, test
| import numpy as np
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
Add data splitter for cross validationimport numpy as np
from sklearn import cross_validation
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
def train_test_split(X, y, test_size=0.2):
data = cross_validation.train_test_split(X, y, test_size=test_size)
training = data[0], data[2]
test = data[1], data[3]
return training, test
| <commit_before>import numpy as np
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
<commit_msg>Add data splitter for cross validation<commit_after>import numpy as np
from sklearn import cross_validation
def overwrite_labels(y):
classes = np.unique(y)
y[y==classes[0]] = -1
y[y==classes[1]] = 1
return y
def train_test_split(X, y, test_size=0.2):
data = cross_validation.train_test_split(X, y, test_size=test_size)
training = data[0], data[2]
test = data[1], data[3]
return training, test
|
62edeb51184758d29be07fbee6deaed0f931760f | word-count/word_count.py | word-count/word_count.py | def word_count(s):
words = strip_punc(s.lower()).split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
| def word_count(s):
words = strip_punc(s).lower().split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
| Move .lower() method call for readability | Move .lower() method call for readability
| Python | agpl-3.0 | CubicComet/exercism-python-solutions | def word_count(s):
words = strip_punc(s.lower()).split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
Move .lower() method call for readability | def word_count(s):
words = strip_punc(s).lower().split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
| <commit_before>def word_count(s):
words = strip_punc(s.lower()).split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
<commit_msg>Move .lower() method call for readability<commit_after> | def word_count(s):
words = strip_punc(s).lower().split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
| def word_count(s):
words = strip_punc(s.lower()).split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
Move .lower() method call for readabilitydef word_count(s):
words = strip_punc(s).lower().split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
| <commit_before>def word_count(s):
words = strip_punc(s.lower()).split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
<commit_msg>Move .lower() method call for readability<commit_after>def word_count(s):
words = strip_punc(s).lower().split()
return {word: words.count(word) for word in set(words)}
def strip_punc(s):
return "".join(ch if ch.isalnum() else " " for ch in s)
|
86d4aa3e5895d5f7ac029df82c63e2b1e29e8c2d | spc/types.py | spc/types.py | """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
| """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block.
#
# Also, fields is an OrderedDict, since the order of fields matters for layout,
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
| Add note that Struct's field collection is an OrderedDict | Add note that Struct's field collection is an OrderedDict
| Python | mit | adamnew123456/spc,adamnew123456/spc | """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
Add note that Struct's field collection is an OrderedDict | """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block.
#
# Also, fields is an OrderedDict, since the order of fields matters for layout,
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
| <commit_before>"""
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
<commit_msg>Add note that Struct's field collection is an OrderedDict<commit_after> | """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block.
#
# Also, fields is an OrderedDict, since the order of fields matters for layout,
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
| """
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
Add note that Struct's field collection is an OrderedDict"""
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block.
#
# Also, fields is an OrderedDict, since the order of fields matters for layout,
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
| <commit_before>"""
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
<commit_msg>Add note that Struct's field collection is an OrderedDict<commit_after>"""
All the different types that the compiler handles.
"""
from collections import namedtuple
IntegerType = namedtuple('IntegerType', [])
Integer = IntegerType()
ByteType = namedtuple('ByteType', [])
Byte = ByteType()
PointerTo = namedtuple('PointerTo', ['type'])
ArrayOf = namedtuple('ArrayOf', ['type', 'count'])
FunctionPointer = namedtuple('FunctionPointer', ['return_type', 'params'])
TypeName = namedtuple('TypeName', ['name'])
# Structure is a bit of an oddity - it can't actually be used in 'raw form'
# by the user, but is always aliased in a declare block.
#
# Also, fields is an OrderedDict, since the order of fields matters for layout,
Struct = namedtuple('Struct', ['fields'])
# This is used merely to record that a function has been declared - the
# actual reified type is FunctionPointer
FunctionDecl = namedtuple('FunctionDecl', ['return_type', 'params'])
AliasDef = namedtuple('AliasDef', ['type'])
# Raw types are types which can be used as variables
RAW_TYPES = (types.IntegerType, types.ByteType, types.TypeName,
types.PointerTo, types.ArrayOf, types.FunctionPointer)
def decay_if_array(type_obj):
"""
Decays arrays types into pointers.
"""
if isinstance(type_obj, types.ArrayOf):
return type_obj.PointerTo(type_obj.type)
else:
return type_obj
def func_decl_to_ptr(func_decl):
"""
Converts a function declaration to a pointer.
"""
return FunctionPointer(*func_decl)
|
1ab00dba5c52b90d3c54ab7832abe7bd785575b4 | bdo_platform/settings_management/development_sskalidakis.py | bdo_platform/settings_management/development_sskalidakis.py | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_platform',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_local_database',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True | Change in database configuration file | Change in database configuration file
| Python | mit | dipapaspyros/bdo_platform,dipapaspyros/bdo_platform,dipapaspyros/bdo_platform | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_platform',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = TrueChange in database configuration file | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_local_database',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True | <commit_before>from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_platform',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True<commit_msg>Change in database configuration file<commit_after> | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_local_database',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True | from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_platform',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = TrueChange in database configuration filefrom bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_local_database',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True | <commit_before>from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_platform',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True<commit_msg>Change in database configuration file<commit_after>from bdo_platform.settings_management.development import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdo_local_database',
'USER': 'postgres',
'PASSWORD': 'sssshmmy',
'HOST': 'localhost',
'PORT': '5432',
},
'UBITECH_POSTGRES': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bigdataocean',
'USER': 'bdo',
'PASSWORD': 'df195715HBdhahfP',
'HOST': '212.101.173.21',
'PORT': '5432',
}
}
SPARK_SUBMIT_PATH = ''
#
# # dev server URL
# SERVER_URL = 'http://127.0.0.1:8000'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True |
f75d30c7c6fe56bb3e4b97cbdc9ff9deba6e211e | astroquery/open_exoplanet_catalogue/tests/test_open_exoplanet_catalogue_local.py | astroquery/open_exoplanet_catalogue/tests/test_open_exoplanet_catalogue_local.py | import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open(data_path('systems.xml.gz'), "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
| import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open('data/systems.xml.gz', "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
| Fix to local data filepath | Fix to local data filepath
| Python | bsd-3-clause | ceb8/astroquery,imbasimba/astroquery,imbasimba/astroquery,ceb8/astroquery | import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open(data_path('systems.xml.gz'), "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
Fix to local data filepath | import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open('data/systems.xml.gz', "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
| <commit_before>import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open(data_path('systems.xml.gz'), "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
<commit_msg>Fix to local data filepath<commit_after> | import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open('data/systems.xml.gz', "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
| import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open(data_path('systems.xml.gz'), "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
Fix to local data filepathimport os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open('data/systems.xml.gz', "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
| <commit_before>import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open(data_path('systems.xml.gz'), "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
<commit_msg>Fix to local data filepath<commit_after>import os
import urllib
from xml.etree import ElementTree as ET
from astropy.tests.helper import pytest
from ...utils.testing_tools import MockResponse
from ... import open_exoplanet_catalogue as oec
@pytest.fixture(autouse=True)
def patch_urlopen(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(urllib, 'urlopen', get_mock_return)
return mp
def get_mock_return(url, params=None, timeout=10,**kwargs):
# dummy function to replace urllib get functionality
# function returns what the http request would but with local data
content = open('data/systems.xml.gz', "r").read()
return MockResponse(content, **kwargs)
# get file path of a static data file for testing
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def test_function(patch_urlopen):
cata = oec.get_catalogue()
assert len(cata.findall('.//planet')) > 0
kepler67b = cata.find(".//planet[name='Kepler-67 b']")
assert kepler67b.findtext('name') == "Kepler-67 b"
assert kepler67b.findtext('discoverymethod') == "transit"
kepler67 = cata.find(".//system[name='Kepler-67']")
assert kepler67.findvalue('distance') == 1107
|
6dea77fa9693a4e6c934e92d56c3fb43fc13bb13 | pronto_praise/pronto_praise/urls.py | pronto_praise/pronto_praise/urls.py | """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^praises/', include('praises.urls')),
]
| """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('praises.urls')),
]
| Set praise list as home | Set praise list as home
| Python | mit | prontotools/pronto-praise,prontotools/pronto-praise,prontotools/pronto-praise,prontotools/pronto-praise | """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^praises/', include('praises.urls')),
]
Set praise list as home | """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('praises.urls')),
]
| <commit_before>"""pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^praises/', include('praises.urls')),
]
<commit_msg>Set praise list as home<commit_after> | """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('praises.urls')),
]
| """pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^praises/', include('praises.urls')),
]
Set praise list as home"""pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('praises.urls')),
]
| <commit_before>"""pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^praises/', include('praises.urls')),
]
<commit_msg>Set praise list as home<commit_after>"""pronto_praise URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('praises.urls')),
]
|
4dceb440069d63133bffe928b5c8aa756574a41c | lowfat/validator.py | lowfat/validator.py | """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
| """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
elif exception.code == 403:
req = request.Request(url, headers={'User-Agent' : "lowFAT"})
online_resource = request.urlopen(req)
else:
raise ValidationError("Error! HTTP status code is {}.".format(exception.code))
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
| Fix problem with sites that blocks bots | Fix problem with sites that blocks bots
| Python | bsd-3-clause | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
Fix problem with sites that blocks bots | """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
elif exception.code == 403:
req = request.Request(url, headers={'User-Agent' : "lowFAT"})
online_resource = request.urlopen(req)
else:
raise ValidationError("Error! HTTP status code is {}.".format(exception.code))
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
| <commit_before>"""
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
<commit_msg>Fix problem with sites that blocks bots<commit_after> | """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
elif exception.code == 403:
req = request.Request(url, headers={'User-Agent' : "lowFAT"})
online_resource = request.urlopen(req)
else:
raise ValidationError("Error! HTTP status code is {}.".format(exception.code))
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
| """
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
Fix problem with sites that blocks bots"""
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
elif exception.code == 403:
req = request.Request(url, headers={'User-Agent' : "lowFAT"})
online_resource = request.urlopen(req)
else:
raise ValidationError("Error! HTTP status code is {}.".format(exception.code))
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
| <commit_before>"""
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
<commit_msg>Fix problem with sites that blocks bots<commit_after>"""
Validator functions
"""
from urllib import request
from urllib.error import HTTPError
from django.core.exceptions import ValidationError
import PyPDF2
def online_document(url):
"""Check if online document is available."""
try:
online_resource = request.urlopen(url)
except HTTPError as exception:
if exception.code == 410:
raise ValidationError("Online document was removed.") # This is the code returned by Google Drive
elif exception.code == 403:
req = request.Request(url, headers={'User-Agent' : "lowFAT"})
online_resource = request.urlopen(req)
else:
raise ValidationError("Error! HTTP status code is {}.".format(exception.code))
# Need to test if website didn't redirect the request to another resource.
if url != online_resource.geturl() or online_resource.getcode() != 200:
raise ValidationError("Can't access online document.")
def pdf(value):
"""Check if filename looks like a PDF file."""
filename = value.name.lower()
if not filename.endswith(".pdf"):
raise ValidationError("File name doesn't look to be a PDF file.")
try:
pdf_file = PyPDF2.PdfFileReader(value.file) # pylint: disable=unused-variable
except:
raise ValidationError("File doesn't look to be a PDF file.")
|
02a975356d6a6b36cc565e8f4b771497867f09dd | tests/test_factorization.py | tests/test_factorization.py | import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| Add test case for Pollard's Rho at zero to bump test coverage | Add test case for Pollard's Rho at zero to bump test coverage
| Python | bsd-3-clause | stphivos/algorithms | import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
Add test case for Pollard's Rho at zero to bump test coverage | import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| <commit_before>import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
<commit_msg>Add test case for Pollard's Rho at zero to bump test coverage<commit_after> | import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
Add test case for Pollard's Rho at zero to bump test coverageimport random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
| <commit_before>import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
<commit_msg>Add test case for Pollard's Rho at zero to bump test coverage<commit_after>import random
import unittest
from algorithms.factorization.pollard_rho import pollard_rho
from algorithms.factorization.trial_division import trial_division
from algorithms.factorization.fermat import fermat
class TestFermat(unittest.TestCase):
def test_fermat(self):
x = random.randint(1, 100000000)
factors = fermat(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
class TestPollardRho(unittest.TestCase):
def test_pollard_rho(self):
x = random.randint(1, 100000000000)
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
def test_pollard_rho_x_is_zero(self):
x = 0
factors = pollard_rho(x)
res = 1
for j in factors:
res *= j
self.assertEqual(x, res)
class TestTrialDivision(unittest.TestCase):
def test_trial_division(self):
x = random.randint(0, 10000000000)
factors = trial_division(x)
res = 1
for i in factors:
res *= i
self.assertEqual(x, res)
|
d5d33f9fb77fd0d9bb4410971e0acd54b8cbf084 | latest_tweets/management/commands/latest_tweets_update.py | latest_tweets/management/commands/latest_tweets_update.py | from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from ..models import Tweet
from ..utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
| from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from latest_tweets.models import Tweet
from latest_tweets.utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
| Fix imports for management command | Fix imports for management command
| Python | bsd-3-clause | blancltd/django-latest-tweets | from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from ..models import Tweet
from ..utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
Fix imports for management command | from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from latest_tweets.models import Tweet
from latest_tweets.utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
| <commit_before>from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from ..models import Tweet
from ..utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
<commit_msg>Fix imports for management command<commit_after> | from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from latest_tweets.models import Tweet
from latest_tweets.utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
| from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from ..models import Tweet
from ..utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
Fix imports for management commandfrom django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from latest_tweets.models import Tweet
from latest_tweets.utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
| <commit_before>from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from ..models import Tweet
from ..utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
<commit_msg>Fix imports for management command<commit_after>from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import transaction
from twitter import OAuth, Twitter
from latest_tweets.models import Tweet
from latest_tweets.utils import update_tweets
@transaction.atomic
def update_user(user):
t = Twitter(auth=OAuth(
settings.TWITTER_OAUTH_TOKEN,
settings.TWITTER_OAUTH_SECRET,
settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET
))
messages = t.statuses.user_timeline(screen_name=user, include_rts=True)
tweet_list = update_tweets(messages=messages)
# To ensure we delete any deleted tweets
oldest_date = None
tweet_id_list = []
for i in tweet_list:
# Help prune out deleted tweets
if not oldest_date or i.created < oldest_date:
oldest_date = i.created
tweet_id_list.append(i.id)
# Remove any deleted tweets in our date range
Tweet.objects.filter(user=user, created__gt=oldest_date).exclude(id__in=tweet_id_list).delete()
class Command(BaseCommand):
args = 'user [user ...]'
def handle(self, *args, **options):
for i in args:
update_user(i)
|
fc8672f3fc8f70f570f3a85fccf5625d3f514c12 | osrframework/__init__.py | osrframework/__init__.py | # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0b"
| # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0"
| Prepare 0.17.0 Vegas for BlackHat Arsenal | Prepare 0.17.0 Vegas for BlackHat Arsenal
| Python | agpl-3.0 | i3visio/osrframework | # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0b"
Prepare 0.17.0 Vegas for BlackHat Arsenal | # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0"
| <commit_before># -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0b"
<commit_msg>Prepare 0.17.0 Vegas for BlackHat Arsenal<commit_after> | # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0"
| # -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0b"
Prepare 0.17.0 Vegas for BlackHat Arsenal# -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0"
| <commit_before># -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0b"
<commit_msg>Prepare 0.17.0 Vegas for BlackHat Arsenal<commit_after># -*- coding: utf-8 -*-
#
##################################################################################
#
# Copyright 2014-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# OSRFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import osrframework.utils.logger
# Calling the logger when being imported
osrframework.utils.logger.setupLogger(loggerName="osrframework")
__version__="0.17.0"
|
6c6f6ec6c5a895f083ff8c9b9a0d76791bb13ce9 | app/eve_api/tasks/static.py | app/eve_api/tasks/static.py | from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
| from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created or not gobj.name or not gobj.name == group['groupName']:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group or not skillobj.name == skill['typeName']:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
| Support if skill group/types are changed | Support if skill group/types are changed
| Python | bsd-3-clause | nikdoof/test-auth | from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
Support if skill group/types are changed | from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created or not gobj.name or not gobj.name == group['groupName']:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group or not skillobj.name == skill['typeName']:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
| <commit_before>from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
<commit_msg>Support if skill group/types are changed<commit_after> | from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created or not gobj.name or not gobj.name == group['groupName']:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group or not skillobj.name == skill['typeName']:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
| from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
Support if skill group/types are changedfrom celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created or not gobj.name or not gobj.name == group['groupName']:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group or not skillobj.name == skill['typeName']:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
| <commit_before>from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
<commit_msg>Support if skill group/types are changed<commit_after>from celery.decorators import task
from eve_proxy.models import CachedDocument
from eve_api.utils import basic_xml_parse_doc
from eve_api.models import EVESkill, EVESkillGroup
@task()
def import_eve_skills():
"""
Imports the skill tree and groups
"""
char_doc = CachedDocument.objects.api_query('/eve/SkillTree.xml.aspx')
d = basic_xml_parse_doc(char_doc)['eveapi']
if 'error' in d:
return
values = d['result']
for group in values['skillGroups']:
gobj, created = EVESkillGroup.objects.get_or_create(id=group['groupID'])
if created or not gobj.name or not gobj.name == group['groupName']:
gobj.name = group['groupName']
gobj.save()
for skill in group['skills']:
skillobj, created = EVESkill.objects.get_or_create(id=skill['typeID'])
if created or not skillobj.name or not skillobj.group or not skillobj.name == skill['typeName']:
skillobj.name = skill['typeName']
skillobj.group = gobj
skillobj.save()
|
f94110a91db9f0e52209e470b6ed8c4b4b3fe30c | common/helpers.py | common/helpers.py | from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
| from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
# Turn a single email into a list of one element
if isinstance(to_list, str) or isinstance(to_list, unicode):
to_list = [to_list, ]
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
| Allow a single email as the to email field | Allow a single email as the to email field
| Python | mit | Socialsquare/RunningCause,Socialsquare/RunningCause,Socialsquare/RunningCause,Socialsquare/RunningCause | from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
Allow a single email as the to email field | from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
# Turn a single email into a list of one element
if isinstance(to_list, str) or isinstance(to_list, unicode):
to_list = [to_list, ]
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
| <commit_before>from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
<commit_msg>Allow a single email as the to email field<commit_after> | from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
# Turn a single email into a list of one element
if isinstance(to_list, str) or isinstance(to_list, unicode):
to_list = [to_list, ]
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
| from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
Allow a single email as the to email fieldfrom django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
# Turn a single email into a list of one element
if isinstance(to_list, str) or isinstance(to_list, unicode):
to_list = [to_list, ]
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
| <commit_before>from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
<commit_msg>Allow a single email as the to email field<commit_after>from django.core.mail import EmailMessage
from django.template import loader, Context
from django.conf import settings
def send_email(to_list, subject, message_template, message_context):
message_context.update({
'BASE_URL': settings.BASE_URL
})
# Turn a single email into a list of one element
if isinstance(to_list, str) or isinstance(to_list, unicode):
to_list = [to_list, ]
context = Context(message_context)
message = loader.get_template(message_template).render(context)
sender = settings.DEFAULT_FROM_EMAIL
msg = EmailMessage(subject, message, sender, to_list)
msg.content_subtype = "html"
return msg.send(fail_silently=True)
|
94cae9c13ac90a7de50cfaf998b9b423e7a2eaf1 | csunplugged/resources/utils/resource_valid_configurations.py | csunplugged/resources/utils/resource_valid_configurations.py | """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, value) in valid_options.items():
if isinstance(value, bool):
valid_options[key] = bool_to_yes_no(value)
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
| """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, values) in valid_options.items():
for i in range(0, len(values)):
if isinstance(values[i], bool):
values[i] = bool_to_yes_no(values[i])
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
| Fix bug where boolean combination values were not changed to strings | Fix bug where boolean combination values were not changed to strings
| Python | mit | uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged | """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, value) in valid_options.items():
if isinstance(value, bool):
valid_options[key] = bool_to_yes_no(value)
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
Fix bug where boolean combination values were not changed to strings | """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, values) in valid_options.items():
for i in range(0, len(values)):
if isinstance(values[i], bool):
values[i] = bool_to_yes_no(values[i])
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
| <commit_before>"""Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, value) in valid_options.items():
if isinstance(value, bool):
valid_options[key] = bool_to_yes_no(value)
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
<commit_msg>Fix bug where boolean combination values were not changed to strings<commit_after> | """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, values) in valid_options.items():
for i in range(0, len(values)):
if isinstance(values[i], bool):
values[i] = bool_to_yes_no(values[i])
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
| """Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, value) in valid_options.items():
if isinstance(value, bool):
valid_options[key] = bool_to_yes_no(value)
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
Fix bug where boolean combination values were not changed to strings"""Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, values) in valid_options.items():
for i in range(0, len(values)):
if isinstance(values[i], bool):
values[i] = bool_to_yes_no(values[i])
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
| <commit_before>"""Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, value) in valid_options.items():
if isinstance(value, bool):
valid_options[key] = bool_to_yes_no(value)
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
<commit_msg>Fix bug where boolean combination values were not changed to strings<commit_after>"""Create list of all possible valid resource combinations."""
import itertools
from utils.bool_to_yes_no import bool_to_yes_no
def resource_valid_configurations(valid_options, header_text=True):
"""Return list of all possible valid resource combinations.
Args:
valid_options: A dictionary containing all valid resource generation
options (dict).
header_text: If true, add in valid options for header text (bool).
Returns:
List of dictionaries of valid combinations (list).
"""
if header_text:
valid_options["header_text"] = ["", "Example header"]
# Change all booleans to text to mimic forms
for (key, values) in valid_options.items():
for i in range(0, len(values)):
if isinstance(values[i], bool):
values[i] = bool_to_yes_no(values[i])
valid_option_keys = sorted(valid_options)
return [dict(zip(valid_option_keys, product)) for product in itertools.product(
*(valid_options[valid_option_key] for valid_option_key in valid_option_keys)
)]
|
61cf3a2f99c01d5da0d75a5ff6b0b2c4cac83487 | plugins/random/plugin.py | plugins/random/plugin.py | import random
from cardinal.decorators import command
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
try:
sides = int(arg)
dice.append(sides)
except (TypeError, ValueError):
if arg[0] != 'd':
continue
try:
sides = int(arg[1:])
dice.append(sides)
except (TypeError, ValueError):
pass
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
| import random
import re
from cardinal.decorators import command
def parse_roll(arg):
# some people might separate with commas
arg = arg.rstrip(',')
if match := re.match(r'^(\d+)?d(\d+)$', arg):
num_dice = match.group(1)
sides = match.group(2)
elif match := re.match(r'^d?(\d+)$', arg):
num_dice = 1
sides = match.group(1)
else:
return []
return [int(sides)] * int(num_dice)
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
dice = dice + parse_roll(arg)
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
| Add support for 5d20 instead of d20 | Add support for 5d20 instead of d20
| Python | mit | JohnMaguire/Cardinal | import random
from cardinal.decorators import command
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
try:
sides = int(arg)
dice.append(sides)
except (TypeError, ValueError):
if arg[0] != 'd':
continue
try:
sides = int(arg[1:])
dice.append(sides)
except (TypeError, ValueError):
pass
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
Add support for 5d20 instead of d20 | import random
import re
from cardinal.decorators import command
def parse_roll(arg):
# some people might separate with commas
arg = arg.rstrip(',')
if match := re.match(r'^(\d+)?d(\d+)$', arg):
num_dice = match.group(1)
sides = match.group(2)
elif match := re.match(r'^d?(\d+)$', arg):
num_dice = 1
sides = match.group(1)
else:
return []
return [int(sides)] * int(num_dice)
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
dice = dice + parse_roll(arg)
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
| <commit_before>import random
from cardinal.decorators import command
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
try:
sides = int(arg)
dice.append(sides)
except (TypeError, ValueError):
if arg[0] != 'd':
continue
try:
sides = int(arg[1:])
dice.append(sides)
except (TypeError, ValueError):
pass
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
<commit_msg>Add support for 5d20 instead of d20<commit_after> | import random
import re
from cardinal.decorators import command
def parse_roll(arg):
# some people might separate with commas
arg = arg.rstrip(',')
if match := re.match(r'^(\d+)?d(\d+)$', arg):
num_dice = match.group(1)
sides = match.group(2)
elif match := re.match(r'^d?(\d+)$', arg):
num_dice = 1
sides = match.group(1)
else:
return []
return [int(sides)] * int(num_dice)
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
dice = dice + parse_roll(arg)
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
| import random
from cardinal.decorators import command
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
try:
sides = int(arg)
dice.append(sides)
except (TypeError, ValueError):
if arg[0] != 'd':
continue
try:
sides = int(arg[1:])
dice.append(sides)
except (TypeError, ValueError):
pass
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
Add support for 5d20 instead of d20import random
import re
from cardinal.decorators import command
def parse_roll(arg):
# some people might separate with commas
arg = arg.rstrip(',')
if match := re.match(r'^(\d+)?d(\d+)$', arg):
num_dice = match.group(1)
sides = match.group(2)
elif match := re.match(r'^d?(\d+)$', arg):
num_dice = 1
sides = match.group(1)
else:
return []
return [int(sides)] * int(num_dice)
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
dice = dice + parse_roll(arg)
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
| <commit_before>import random
from cardinal.decorators import command
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
try:
sides = int(arg)
dice.append(sides)
except (TypeError, ValueError):
if arg[0] != 'd':
continue
try:
sides = int(arg[1:])
dice.append(sides)
except (TypeError, ValueError):
pass
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
<commit_msg>Add support for 5d20 instead of d20<commit_after>import random
import re
from cardinal.decorators import command
def parse_roll(arg):
# some people might separate with commas
arg = arg.rstrip(',')
if match := re.match(r'^(\d+)?d(\d+)$', arg):
num_dice = match.group(1)
sides = match.group(2)
elif match := re.match(r'^d?(\d+)$', arg):
num_dice = 1
sides = match.group(1)
else:
return []
return [int(sides)] * int(num_dice)
class RandomPlugin:
@command('roll')
def roll(self, cardinal, user, channel, msg):
args = msg.split(' ')
args.pop(0)
dice = []
for arg in args:
dice = dice + parse_roll(arg)
results = []
limit = 10
for sides in dice:
if sides < 2 or sides > 120:
continue
limit -= 1
# Don't allow more than ten dice rolled at a time
if limit < 0:
break
results.append((sides, random.randint(1, sides)))
messages = ', '.join(
[f"d{sides}: {result}" for sides, result in results]
)
cardinal.sendMsg(channel, messages)
entrypoint = RandomPlugin
|
86ff9a791290455cd169cca7697587a2ad9f350b | services/flickr.py | services/flickr.py | import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
# (None, 'access only your public photos'),
# ('read', 'access your public and private photos'),
# ('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
https = False
def get_authorize_params(self, redirect_uri):
params = super(Flickr, self).get_authorize_params(redirect_uri)
params['perms'] = self.available_permissions[0][0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
| import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
(None, 'access only your public photos'),
('read', 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
https = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
| Rewrite Flickr to use the new scope selection system | Rewrite Flickr to use the new scope selection system
| Python | bsd-3-clause | foauth/foauth.org,foauth/foauth.org,foauth/oauth-proxy,foauth/foauth.org | import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
# (None, 'access only your public photos'),
# ('read', 'access your public and private photos'),
# ('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
https = False
def get_authorize_params(self, redirect_uri):
params = super(Flickr, self).get_authorize_params(redirect_uri)
params['perms'] = self.available_permissions[0][0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
Rewrite Flickr to use the new scope selection system | import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
(None, 'access only your public photos'),
('read', 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
https = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
| <commit_before>import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
# (None, 'access only your public photos'),
# ('read', 'access your public and private photos'),
# ('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
https = False
def get_authorize_params(self, redirect_uri):
params = super(Flickr, self).get_authorize_params(redirect_uri)
params['perms'] = self.available_permissions[0][0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
<commit_msg>Rewrite Flickr to use the new scope selection system<commit_after> | import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
(None, 'access only your public photos'),
('read', 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
https = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
| import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
# (None, 'access only your public photos'),
# ('read', 'access your public and private photos'),
# ('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
https = False
def get_authorize_params(self, redirect_uri):
params = super(Flickr, self).get_authorize_params(redirect_uri)
params['perms'] = self.available_permissions[0][0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
Rewrite Flickr to use the new scope selection systemimport foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
(None, 'access only your public photos'),
('read', 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
https = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
| <commit_before>import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
# (None, 'access only your public photos'),
# ('read', 'access your public and private photos'),
# ('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
https = False
def get_authorize_params(self, redirect_uri):
params = super(Flickr, self).get_authorize_params(redirect_uri)
params['perms'] = self.available_permissions[0][0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
<commit_msg>Rewrite Flickr to use the new scope selection system<commit_after>import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'api.flickr.com'
available_permissions = [
(None, 'access only your public photos'),
('read', 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
https = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json[u'person'][u'nsid']
|
f5ddffdf4c0cf85dcade939ff4e3b9b72b968d38 | mfr/image/render.py | mfr/image/render.py | """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="/{src}" alt="{alt}" />'.format(src=src, alt=alt)
| """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
| Remove implicit addition of leading slash | Remove implicit addition of leading slash
| Python | apache-2.0 | icereval/modular-file-renderer,AddisonSchiller/modular-file-renderer,mfraezz/modular-file-renderer,icereval/modular-file-renderer,chrisseto/modular-file-renderer,chrisseto/modular-file-renderer,Johnetordoff/modular-file-renderer,rdhyee/modular-file-renderer,rdhyee/modular-file-renderer,haoyuchen1992/modular-file-renderer,CenterForOpenScience/modular-file-renderer,Johnetordoff/modular-file-renderer,mfraezz/modular-file-renderer,haoyuchen1992/modular-file-renderer,mfraezz/modular-file-renderer,icereval/modular-file-renderer,rdhyee/modular-file-renderer,CenterForOpenScience/modular-file-renderer,erinspace/modular-file-renderer,TomBaxter/modular-file-renderer,CenterForOpenScience/modular-file-renderer,TomBaxter/modular-file-renderer,felliott/modular-file-renderer,AddisonSchiller/modular-file-renderer,Johnetordoff/modular-file-renderer,TomBaxter/modular-file-renderer,erinspace/modular-file-renderer,AddisonSchiller/modular-file-renderer,rdhyee/modular-file-renderer,chrisseto/modular-file-renderer,haoyuchen1992/modular-file-renderer,AddisonSchiller/modular-file-renderer,felliott/modular-file-renderer,erinspace/modular-file-renderer,mfraezz/modular-file-renderer,haoyuchen1992/modular-file-renderer,Johnetordoff/modular-file-renderer,felliott/modular-file-renderer,TomBaxter/modular-file-renderer,felliott/modular-file-renderer,CenterForOpenScience/modular-file-renderer | """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="/{src}" alt="{alt}" />'.format(src=src, alt=alt)
Remove implicit addition of leading slash | """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
| <commit_before>"""Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="/{src}" alt="{alt}" />'.format(src=src, alt=alt)
<commit_msg>Remove implicit addition of leading slash<commit_after> | """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
| """Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="/{src}" alt="{alt}" />'.format(src=src, alt=alt)
Remove implicit addition of leading slash"""Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
| <commit_before>"""Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="/{src}" alt="{alt}" />'.format(src=src, alt=alt)
<commit_msg>Remove implicit addition of leading slash<commit_after>"""Image renderer module."""
def render_img_tag(fp, src=None, alt=''):
"""A simple image tag renderer.
:param str:
"""
# Default src to the filename
if src is None:
src = fp.name
return '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt)
|
8f4e61667a7bee4ec847fd8921dccd291d59388c | scripts/slave/chromium/test_webkitpy_wrapper.py | scripts/slave/chromium/test_webkitpy_wrapper.py | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir, _ = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
| Revert 232677 "Revert 232670 "Fix script after r232641"" | Revert 232677 "Revert 232670 "Fix script after r232641""
False alarm, tests we failing due to PDT switch.
> Revert 232670 "Fix script after r232641"
>
> Needs to be out to speculatively revert r232641.
>
> > Fix script after r232641
> >
> > BUG=314253
> > TBR=pfeldman@chromium.org
> >
> > Review URL: https://codereview.chromium.org/49753004
>
> TBR=thakis@chromium.org
>
> Review URL: https://codereview.chromium.org/57293002
TBR=pfeldman@chromium.org
Review URL: https://codereview.chromium.org/57323002
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@232682 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | eunchong/build,eunchong/build,eunchong/build,eunchong/build | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir, _ = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
Revert 232677 "Revert 232670 "Fix script after r232641""
False alarm, tests we failing due to PDT switch.
> Revert 232670 "Fix script after r232641"
>
> Needs to be out to speculatively revert r232641.
>
> > Fix script after r232641
> >
> > BUG=314253
> > TBR=pfeldman@chromium.org
> >
> > Review URL: https://codereview.chromium.org/49753004
>
> TBR=thakis@chromium.org
>
> Review URL: https://codereview.chromium.org/57293002
TBR=pfeldman@chromium.org
Review URL: https://codereview.chromium.org/57323002
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@232682 0039d316-1c4b-4281-b951-d872f2087c98 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
| <commit_before>#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir, _ = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
<commit_msg>Revert 232677 "Revert 232670 "Fix script after r232641""
False alarm, tests we failing due to PDT switch.
> Revert 232670 "Fix script after r232641"
>
> Needs to be out to speculatively revert r232641.
>
> > Fix script after r232641
> >
> > BUG=314253
> > TBR=pfeldman@chromium.org
> >
> > Review URL: https://codereview.chromium.org/49753004
>
> TBR=thakis@chromium.org
>
> Review URL: https://codereview.chromium.org/57293002
TBR=pfeldman@chromium.org
Review URL: https://codereview.chromium.org/57323002
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@232682 0039d316-1c4b-4281-b951-d872f2087c98<commit_after> | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir, _ = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
Revert 232677 "Revert 232670 "Fix script after r232641""
False alarm, tests we failing due to PDT switch.
> Revert 232670 "Fix script after r232641"
>
> Needs to be out to speculatively revert r232641.
>
> > Fix script after r232641
> >
> > BUG=314253
> > TBR=pfeldman@chromium.org
> >
> > Review URL: https://codereview.chromium.org/49753004
>
> TBR=thakis@chromium.org
>
> Review URL: https://codereview.chromium.org/57293002
TBR=pfeldman@chromium.org
Review URL: https://codereview.chromium.org/57323002
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@232682 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
| <commit_before>#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir, _ = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
<commit_msg>Revert 232677 "Revert 232670 "Fix script after r232641""
False alarm, tests we failing due to PDT switch.
> Revert 232670 "Fix script after r232641"
>
> Needs to be out to speculatively revert r232641.
>
> > Fix script after r232641
> >
> > BUG=314253
> > TBR=pfeldman@chromium.org
> >
> > Review URL: https://codereview.chromium.org/49753004
>
> TBR=thakis@chromium.org
>
> Review URL: https://codereview.chromium.org/57293002
TBR=pfeldman@chromium.org
Review URL: https://codereview.chromium.org/57323002
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@232682 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper script that invokes test-webkitpy."""
import optparse
import os
import sys
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--build-dir', help='ignored')
# Note that --target isn't needed for --lint-test-files, but the
# RunPythonCommandInBuildDir() will get upset if we don't say something.
option_parser.add_option('', '--target', default='release',
help='DumpRenderTree build configuration (Release or Debug)')
options, _ = option_parser.parse_args()
options.build_dir = build_directory.GetBuildOutputDirectory()
build_dir = os.path.abspath(options.build_dir)
webkit_tests_dir = chromium_utils.FindUpward(build_dir,
'third_party', 'WebKit',
'Tools', 'Scripts')
command = [os.path.join(webkit_tests_dir, 'test-webkitpy')]
return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,
command)
if '__main__' == __name__:
sys.exit(main())
|
0ef7061bb0661efd101efddc98d55cf819eba219 | scmtiles/test/unit/util/__init__.py | scmtiles/test/unit/util/__init__.py | """Tests for the `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| """Tests for `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Improve the util module unit test docstring. | Improve the util module unit test docstring.
| Python | apache-2.0 | aopp-pred/scmtiles | """Tests for the `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Improve the util module unit test docstring. | """Tests for `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| <commit_before>"""Tests for the `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<commit_msg>Improve the util module unit test docstring.<commit_after> | """Tests for `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| """Tests for the `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Improve the util module unit test docstring."""Tests for `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| <commit_before>"""Tests for the `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<commit_msg>Improve the util module unit test docstring.<commit_after>"""Tests for `scmtiles.util`."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
68480f191167573e32853427c37049fc6f7d1279 | nsfw_dl/__init__.py | nsfw_dl/__init__.py | """
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
from .dl import NSFWDL # noqa
__title__ = 'nsfw_dl'
__author__ = 'IzunaDevs'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 IzunaDevs'
__version__ = '0.2.0'
__build__ = 0x000100
| Revert "Make it find this" | Revert "Make it find this"
This reverts commit f4d8589a9c424d32ae403eddd7d97366c27cb8fd.
| Python | mit | IzunaDevs/nsfw_dl |
Revert "Make it find this"
This reverts commit f4d8589a9c424d32ae403eddd7d97366c27cb8fd. | """
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
from .dl import NSFWDL # noqa
__title__ = 'nsfw_dl'
__author__ = 'IzunaDevs'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 IzunaDevs'
__version__ = '0.2.0'
__build__ = 0x000100
| <commit_before>
<commit_msg>Revert "Make it find this"
This reverts commit f4d8589a9c424d32ae403eddd7d97366c27cb8fd.<commit_after> | """
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
from .dl import NSFWDL # noqa
__title__ = 'nsfw_dl'
__author__ = 'IzunaDevs'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 IzunaDevs'
__version__ = '0.2.0'
__build__ = 0x000100
|
Revert "Make it find this"
This reverts commit f4d8589a9c424d32ae403eddd7d97366c27cb8fd."""
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
from .dl import NSFWDL # noqa
__title__ = 'nsfw_dl'
__author__ = 'IzunaDevs'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 IzunaDevs'
__version__ = '0.2.0'
__build__ = 0x000100
| <commit_before>
<commit_msg>Revert "Make it find this"
This reverts commit f4d8589a9c424d32ae403eddd7d97366c27cb8fd.<commit_after>"""
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
from .dl import NSFWDL # noqa
__title__ = 'nsfw_dl'
__author__ = 'IzunaDevs'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 IzunaDevs'
__version__ = '0.2.0'
__build__ = 0x000100
| |
452b67fa4fe5d9f34a98971e377bbaa1b978907b | superblock.py | superblock.py | #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def block_printer(filename, offset, block_count):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 8):
part1 = f.read(4)
part2 = f.read(4)
print '{0:2}: {1} {2} {3}'.format(j+1, hexlify(part1), hexlify(part2), ''.join(map(nonprintable_replace, part1 + part2)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print 'Printing superblock (bytes 1024-1535) of file %s.\n' % filename
print ''.center(5) + 'HEX'.center(18) + 'ASCII'.center(8)
block_printer(filename, 2, 1)
| #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
def block_printer(filename, offset, block_count):
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 16):
word = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, word))
ascii_string = ''.join(map(nonprintable_replace, ''.join(word)))
print '{0:2}: {1} {2}'.format(j + 1, hex_string, ascii_string)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
block_printer(filename, 2, 1)
| Print 16 bit per line | Print 16 bit per line
| Python | mit | dbrgn/superblock | #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def block_printer(filename, offset, block_count):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 8):
part1 = f.read(4)
part2 = f.read(4)
print '{0:2}: {1} {2} {3}'.format(j+1, hexlify(part1), hexlify(part2), ''.join(map(nonprintable_replace, part1 + part2)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print 'Printing superblock (bytes 1024-1535) of file %s.\n' % filename
print ''.center(5) + 'HEX'.center(18) + 'ASCII'.center(8)
block_printer(filename, 2, 1)
Print 16 bit per line | #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
def block_printer(filename, offset, block_count):
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 16):
word = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, word))
ascii_string = ''.join(map(nonprintable_replace, ''.join(word)))
print '{0:2}: {1} {2}'.format(j + 1, hex_string, ascii_string)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
block_printer(filename, 2, 1)
| <commit_before>#!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def block_printer(filename, offset, block_count):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 8):
part1 = f.read(4)
part2 = f.read(4)
print '{0:2}: {1} {2} {3}'.format(j+1, hexlify(part1), hexlify(part2), ''.join(map(nonprintable_replace, part1 + part2)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print 'Printing superblock (bytes 1024-1535) of file %s.\n' % filename
print ''.center(5) + 'HEX'.center(18) + 'ASCII'.center(8)
block_printer(filename, 2, 1)
<commit_msg>Print 16 bit per line<commit_after> | #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
def block_printer(filename, offset, block_count):
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 16):
word = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, word))
ascii_string = ''.join(map(nonprintable_replace, ''.join(word)))
print '{0:2}: {1} {2}'.format(j + 1, hex_string, ascii_string)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
block_printer(filename, 2, 1)
| #!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def block_printer(filename, offset, block_count):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 8):
part1 = f.read(4)
part2 = f.read(4)
print '{0:2}: {1} {2} {3}'.format(j+1, hexlify(part1), hexlify(part2), ''.join(map(nonprintable_replace, part1 + part2)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print 'Printing superblock (bytes 1024-1535) of file %s.\n' % filename
print ''.center(5) + 'HEX'.center(18) + 'ASCII'.center(8)
block_printer(filename, 2, 1)
Print 16 bit per line#!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
def block_printer(filename, offset, block_count):
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 16):
word = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, word))
ascii_string = ''.join(map(nonprintable_replace, ''.join(word)))
print '{0:2}: {1} {2}'.format(j + 1, hex_string, ascii_string)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
block_printer(filename, 2, 1)
| <commit_before>#!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def block_printer(filename, offset, block_count):
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 8):
part1 = f.read(4)
part2 = f.read(4)
print '{0:2}: {1} {2} {3}'.format(j+1, hexlify(part1), hexlify(part2), ''.join(map(nonprintable_replace, part1 + part2)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print 'Printing superblock (bytes 1024-1535) of file %s.\n' % filename
print ''.center(5) + 'HEX'.center(18) + 'ASCII'.center(8)
block_printer(filename, 2, 1)
<commit_msg>Print 16 bit per line<commit_after>#!/usr/bin/env python2
"""
Analyze superblock in ext2 filesystem.
Usage:
superblock.py <filename>
"""
import sys
import string
from binascii import hexlify
BLOCKSIZE = 512
def nonprintable_replace(char):
if char not in string.printable:
return '.'
if char in '\n\r\t\x0b\x0c':
return '.'
return char
def block_printer(filename, offset, block_count):
with open(filename, 'rb') as f:
f.seek(offset * BLOCKSIZE)
# Loop over blocks
for i in xrange(block_count):
# Loop over bytes
for j in xrange(BLOCKSIZE / 16):
word = f.read(4), f.read(4), f.read(4), f.read(4)
hex_string = ' '.join(map(hexlify, word))
ascii_string = ''.join(map(nonprintable_replace, ''.join(word)))
print '{0:2}: {1} {2}'.format(j + 1, hex_string, ascii_string)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: superblock.py <filename>'
sys.exit(1)
filename = sys.argv[1]
print '\nPrinting superblock (bytes 1024-1535) of file %s.\n' % filename
print ' ' * 5 + 'HEX'.center(35) + ' ' + 'ASCII'.center(16)
block_printer(filename, 2, 1)
|
3744a620bddde501c0b2634b7cd54a755433c17a | djangopeoplenet/manage.py | djangopeoplenet/manage.py | #!/usr/bin/env python
import sys
paths = (
'/home/simon/sites/djangopeople.net',
'/home/simon/sites/djangopeople.net/djangopeoplenet',
'/home/simon/sites/djangopeople.net/djangopeoplenet/djangopeople/lib',
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| #!/usr/bin/env python
import sys, os
root = os.path.dirname(__file__)
paths = (
os.path.join(root),
os.path.join(root, "djangopeople", "lib"),
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Make the lib imports work on other computers than Simon's | Make the lib imports work on other computers than Simon's | Python | mit | brutasse/djangopeople,django/djangopeople,brutasse/djangopeople,polinom/djangopeople,brutasse/djangopeople,polinom/djangopeople,polinom/djangopeople,django/djangopeople,brutasse/djangopeople,polinom/djangopeople,django/djangopeople | #!/usr/bin/env python
import sys
paths = (
'/home/simon/sites/djangopeople.net',
'/home/simon/sites/djangopeople.net/djangopeoplenet',
'/home/simon/sites/djangopeople.net/djangopeoplenet/djangopeople/lib',
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
Make the lib imports work on other computers than Simon's | #!/usr/bin/env python
import sys, os
root = os.path.dirname(__file__)
paths = (
os.path.join(root),
os.path.join(root, "djangopeople", "lib"),
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| <commit_before>#!/usr/bin/env python
import sys
paths = (
'/home/simon/sites/djangopeople.net',
'/home/simon/sites/djangopeople.net/djangopeoplenet',
'/home/simon/sites/djangopeople.net/djangopeoplenet/djangopeople/lib',
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
<commit_msg>Make the lib imports work on other computers than Simon's<commit_after> | #!/usr/bin/env python
import sys, os
root = os.path.dirname(__file__)
paths = (
os.path.join(root),
os.path.join(root, "djangopeople", "lib"),
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| #!/usr/bin/env python
import sys
paths = (
'/home/simon/sites/djangopeople.net',
'/home/simon/sites/djangopeople.net/djangopeoplenet',
'/home/simon/sites/djangopeople.net/djangopeoplenet/djangopeople/lib',
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
Make the lib imports work on other computers than Simon's#!/usr/bin/env python
import sys, os
root = os.path.dirname(__file__)
paths = (
os.path.join(root),
os.path.join(root, "djangopeople", "lib"),
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| <commit_before>#!/usr/bin/env python
import sys
paths = (
'/home/simon/sites/djangopeople.net',
'/home/simon/sites/djangopeople.net/djangopeoplenet',
'/home/simon/sites/djangopeople.net/djangopeoplenet/djangopeople/lib',
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
<commit_msg>Make the lib imports work on other computers than Simon's<commit_after>#!/usr/bin/env python
import sys, os
root = os.path.dirname(__file__)
paths = (
os.path.join(root),
os.path.join(root, "djangopeople", "lib"),
)
for path in paths:
if not path in sys.path:
sys.path.insert(0, path)
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
5ead9e24ec73ee66886858bf70f357ae170bdf3b | spillway/mixins.py | spillway/mixins.py | class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def get_query_form(self):
"""Returns a bound form instance."""
return self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
def clean_params(self):
"""Returns a validated form dict or an empty dict."""
form = self.get_query_form()
return form.cleaned_data if form.is_valid() else {}
| from rest_framework.exceptions import ValidationError
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def clean_params(self):
"""Returns a validated form dict from Request parameters."""
form = self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
if form.is_valid():
return form.cleaned_data
raise ValidationError(form.errors)
| Throw ValidationError for invalid form, drop get_query_form() | Throw ValidationError for invalid form, drop get_query_form()
| Python | bsd-3-clause | kuzmich/django-spillway,barseghyanartur/django-spillway,bkg/django-spillway | class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def get_query_form(self):
"""Returns a bound form instance."""
return self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
def clean_params(self):
"""Returns a validated form dict or an empty dict."""
form = self.get_query_form()
return form.cleaned_data if form.is_valid() else {}
Throw ValidationError for invalid form, drop get_query_form() | from rest_framework.exceptions import ValidationError
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def clean_params(self):
"""Returns a validated form dict from Request parameters."""
form = self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
if form.is_valid():
return form.cleaned_data
raise ValidationError(form.errors)
| <commit_before>class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def get_query_form(self):
"""Returns a bound form instance."""
return self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
def clean_params(self):
"""Returns a validated form dict or an empty dict."""
form = self.get_query_form()
return form.cleaned_data if form.is_valid() else {}
<commit_msg>Throw ValidationError for invalid form, drop get_query_form()<commit_after> | from rest_framework.exceptions import ValidationError
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def clean_params(self):
"""Returns a validated form dict from Request parameters."""
form = self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
if form.is_valid():
return form.cleaned_data
raise ValidationError(form.errors)
| class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def get_query_form(self):
"""Returns a bound form instance."""
return self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
def clean_params(self):
"""Returns a validated form dict or an empty dict."""
form = self.get_query_form()
return form.cleaned_data if form.is_valid() else {}
Throw ValidationError for invalid form, drop get_query_form()from rest_framework.exceptions import ValidationError
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def clean_params(self):
"""Returns a validated form dict from Request parameters."""
form = self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
if form.is_valid():
return form.cleaned_data
raise ValidationError(form.errors)
| <commit_before>class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def get_query_form(self):
"""Returns a bound form instance."""
return self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
def clean_params(self):
"""Returns a validated form dict or an empty dict."""
form = self.get_query_form()
return form.cleaned_data if form.is_valid() else {}
<commit_msg>Throw ValidationError for invalid form, drop get_query_form()<commit_after>from rest_framework.exceptions import ValidationError
class ModelSerializerMixin(object):
"""Provides generic model serializer classes to views."""
model_serializer_class = None
def get_serializer_class(self):
if self.serializer_class:
return self.serializer_class
class DefaultSerializer(self.model_serializer_class):
class Meta:
model = self.queryset.model
return DefaultSerializer
class QueryFormMixin(object):
"""Provides form based handling of GET or POST requests."""
query_form_class = None
def clean_params(self):
"""Returns a validated form dict from Request parameters."""
form = self.query_form_class(
self.request.query_params or self.request.data,
self.request.FILES or None)
if form.is_valid():
return form.cleaned_data
raise ValidationError(form.errors)
|
6676a47806c3c35d800450ff9480cabdc52928e8 | tedx/views.py | tedx/views.py | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
def list_registration(request):
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
| from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from clubs.models import Team
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
@login_required
def list_registration(request):
tedx_team = Team.objects.get(code_name="tedx_2017_registration")
is_tedx_member = tedx_team.members.filter(pk=request.user.pk).exists() or\
tedx_team.coordinator == request.user
if not request.user.is_superuser and\
not is_tedx_member:
raise PermissionDenied
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
| Add permission check for TEDx | Add permission check for TEDx
| Python | agpl-3.0 | osamak/student-portal,enjaz/enjaz,osamak/student-portal,osamak/student-portal,enjaz/enjaz,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,osamak/student-portal,enjaz/enjaz | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
def list_registration(request):
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
Add permission check for TEDx | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from clubs.models import Team
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
@login_required
def list_registration(request):
tedx_team = Team.objects.get(code_name="tedx_2017_registration")
is_tedx_member = tedx_team.members.filter(pk=request.user.pk).exists() or\
tedx_team.coordinator == request.user
if not request.user.is_superuser and\
not is_tedx_member:
raise PermissionDenied
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
| <commit_before>from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
def list_registration(request):
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
<commit_msg>Add permission check for TEDx<commit_after> | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from clubs.models import Team
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
@login_required
def list_registration(request):
tedx_team = Team.objects.get(code_name="tedx_2017_registration")
is_tedx_member = tedx_team.members.filter(pk=request.user.pk).exists() or\
tedx_team.coordinator == request.user
if not request.user.is_superuser and\
not is_tedx_member:
raise PermissionDenied
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
| from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
def list_registration(request):
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
Add permission check for TEDxfrom django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from clubs.models import Team
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
@login_required
def list_registration(request):
tedx_team = Team.objects.get(code_name="tedx_2017_registration")
is_tedx_member = tedx_team.members.filter(pk=request.user.pk).exists() or\
tedx_team.coordinator == request.user
if not request.user.is_superuser and\
not is_tedx_member:
raise PermissionDenied
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
| <commit_before>from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
def list_registration(request):
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
<commit_msg>Add permission check for TEDx<commit_after>from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import render
from clubs.models import Team
from .forms import RegistrationForm
from .models import Registration
import utils
def handle_registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
registration = form.save(commit=False)
# If user is logged-in, let's try sending a tweet!
if request.user.is_authenticated():
registration.user = request.user
utils.create_tweet(request.user)
registration.save()
return HttpResponseRedirect(reverse('tedx:thanks'))
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'tedx/index.html', context)
@login_required
def list_registration(request):
tedx_team = Team.objects.get(code_name="tedx_2017_registration")
is_tedx_member = tedx_team.members.filter(pk=request.user.pk).exists() or\
tedx_team.coordinator == request.user
if not request.user.is_superuser and\
not is_tedx_member:
raise PermissionDenied
list_registration = Registration.objects.all()
context = {'list_registration' : list_registration}
return render(request, 'tedx/list_registration.html', context)
|
c4bd1d33a69979ba71e4f15145d154b9d986f10c | ci/ci-deploy-data-center.py | ci/ci-deploy-data-center.py | #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx:
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx is not None:
print("Setting connectivity for NSX offerings")
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
| Fix setting connectivity for NSX offerings | Fix setting connectivity for NSX offerings
| Python | apache-2.0 | MissionCriticalCloud/bubble,schubergphilis/MCT-shared,MissionCriticalCloud/bubble-toolkit,MissionCriticalCloud/bubble,MissionCriticalCloud/bubble-toolkit,schubergphilis/MCT-shared | #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx:
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
Fix setting connectivity for NSX offerings | #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx is not None:
print("Setting connectivity for NSX offerings")
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
| <commit_before>#!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx:
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
<commit_msg>Fix setting connectivity for NSX offerings<commit_after> | #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx is not None:
print("Setting connectivity for NSX offerings")
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx:
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
Fix setting connectivity for NSX offerings#!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx is not None:
print("Setting connectivity for NSX offerings")
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
| <commit_before>#!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx:
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
<commit_msg>Fix setting connectivity for NSX offerings<commit_after>#!/usr/bin/env python
import click
from Cosmic.CI import *
from Cosmic.NSX import *
@click.command()
@click.option('--marvin-config', '-m', help='Marvin config file', required=True)
@click.option('--debug', help="Turn on debugging output", is_flag=True)
def main(**kwargs):
ci = CI(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
nsx = NSX(marvin_config=kwargs.get('marvin_config'), debug=kwargs.get('debug'))
ci.wait_for_port(ci.config['mgtSvr'][0]['mgtSvrIp'])
ci.copy_marvin_config()
ci.deploy_dc()
if nsx is not None:
print("Setting connectivity for NSX offerings")
nsx.add_connectivy_to_offerings()
ci.wait_for_templates()
if __name__ == '__main__':
main()
|
3ad466fc9b1971f3c10123db7b962bc93f79eb78 | sahara_dashboard/enabled/_1810_data_processing_panel_group.py | sahara_dashboard/enabled/_1810_data_processing_panel_group.py | from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
| Add Apache 2.0 license to source file | Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldnt contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: If6676d7bf7409ebec2a035bdae158b644e19a861
| Python | apache-2.0 | openstack/sahara-dashboard,openstack/sahara-dashboard,openstack/sahara-dashboard,openstack/sahara-dashboard | from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldnt contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: If6676d7bf7409ebec2a035bdae158b644e19a861 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
| <commit_before>from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldnt contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: If6676d7bf7409ebec2a035bdae158b644e19a861<commit_after> | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
| from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldnt contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: If6676d7bf7409ebec2a035bdae158b644e19a861# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
| <commit_before>from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldnt contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: If6676d7bf7409ebec2a035bdae158b644e19a861<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
# The slug of the panel group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'data_processing'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('Data Processing')
# The slug of the dashboard the PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'project'
ADD_INSTALLED_APPS = ['sahara_dashboard']
|
3fc9588a0f689a01ac8da0f43551418d3b3649b5 | extractor_train/assets.py | extractor_train/assets.py | # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
"libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
"libs/annotator.1.2.9/annotator.min.js",
"libs/rangy/external/log4javascript_stub.js",
"libs/rangy/src/core/core.js",
"libs/rangy/src/core/dom.js",
"libs/rangy/src/core//domrange.js",
"libs/rangy/src/core/wrappedrange.js",
"libs/rangy/src/core/wrappedselection.js",
"libs/rangy/src/modules/rangy-classapplier.js",
"libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
# "libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
# "libs/annotator.1.2.9/annotator.min.js",
# "libs/rangy/external/log4javascript_stub.js",
# "libs/rangy/src/core/core.js",
# "libs/rangy/src/core/dom.js",
# "libs/rangy/src/core//domrange.js",
# "libs/rangy/src/core/wrappedrange.js",
# "libs/rangy/src/core/wrappedselection.js",
# "libs/rangy/src/modules/rangy-classapplier.js",
# "libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| Remove inlcudes for no longer needed libraries. | Remove inlcudes for no longer needed libraries.
| Python | bsd-3-clause | dlarochelle/extractor_train,dlarochelle/extractor_train,dlarochelle/extractor_train | # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
"libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
"libs/annotator.1.2.9/annotator.min.js",
"libs/rangy/external/log4javascript_stub.js",
"libs/rangy/src/core/core.js",
"libs/rangy/src/core/dom.js",
"libs/rangy/src/core//domrange.js",
"libs/rangy/src/core/wrappedrange.js",
"libs/rangy/src/core/wrappedselection.js",
"libs/rangy/src/modules/rangy-classapplier.js",
"libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
Remove inlcudes for no longer needed libraries. | # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
# "libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
# "libs/annotator.1.2.9/annotator.min.js",
# "libs/rangy/external/log4javascript_stub.js",
# "libs/rangy/src/core/core.js",
# "libs/rangy/src/core/dom.js",
# "libs/rangy/src/core//domrange.js",
# "libs/rangy/src/core/wrappedrange.js",
# "libs/rangy/src/core/wrappedselection.js",
# "libs/rangy/src/modules/rangy-classapplier.js",
# "libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| <commit_before># -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
"libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
"libs/annotator.1.2.9/annotator.min.js",
"libs/rangy/external/log4javascript_stub.js",
"libs/rangy/src/core/core.js",
"libs/rangy/src/core/dom.js",
"libs/rangy/src/core//domrange.js",
"libs/rangy/src/core/wrappedrange.js",
"libs/rangy/src/core/wrappedselection.js",
"libs/rangy/src/modules/rangy-classapplier.js",
"libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
<commit_msg>Remove inlcudes for no longer needed libraries.<commit_after> | # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
# "libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
# "libs/annotator.1.2.9/annotator.min.js",
# "libs/rangy/external/log4javascript_stub.js",
# "libs/rangy/src/core/core.js",
# "libs/rangy/src/core/dom.js",
# "libs/rangy/src/core//domrange.js",
# "libs/rangy/src/core/wrappedrange.js",
# "libs/rangy/src/core/wrappedselection.js",
# "libs/rangy/src/modules/rangy-classapplier.js",
# "libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| # -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
"libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
"libs/annotator.1.2.9/annotator.min.js",
"libs/rangy/external/log4javascript_stub.js",
"libs/rangy/src/core/core.js",
"libs/rangy/src/core/dom.js",
"libs/rangy/src/core//domrange.js",
"libs/rangy/src/core/wrappedrange.js",
"libs/rangy/src/core/wrappedselection.js",
"libs/rangy/src/modules/rangy-classapplier.js",
"libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
Remove inlcudes for no longer needed libraries.# -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
# "libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
# "libs/annotator.1.2.9/annotator.min.js",
# "libs/rangy/external/log4javascript_stub.js",
# "libs/rangy/src/core/core.js",
# "libs/rangy/src/core/dom.js",
# "libs/rangy/src/core//domrange.js",
# "libs/rangy/src/core/wrappedrange.js",
# "libs/rangy/src/core/wrappedselection.js",
# "libs/rangy/src/modules/rangy-classapplier.js",
# "libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
| <commit_before># -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
"libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
"libs/annotator.1.2.9/annotator.min.js",
"libs/rangy/external/log4javascript_stub.js",
"libs/rangy/src/core/core.js",
"libs/rangy/src/core/dom.js",
"libs/rangy/src/core//domrange.js",
"libs/rangy/src/core/wrappedrange.js",
"libs/rangy/src/core/wrappedselection.js",
"libs/rangy/src/modules/rangy-classapplier.js",
"libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
<commit_msg>Remove inlcudes for no longer needed libraries.<commit_after># -*- coding: utf-8 -*-
from flask.ext.assets import Bundle, Environment
css = Bundle(
"libs/bootstrap/dist/css/bootstrap.css",
"css/style.css",
# "libs/annotator.1.2.9/annotator.min.css",
filters="cssmin",
output="public/css/common.css"
)
js = Bundle(
"libs/jQuery/dist/jquery.js",
"libs/bootstrap/dist/js/bootstrap.js",
"js/plugins.js",
# "libs/annotator.1.2.9/annotator.min.js",
# "libs/rangy/external/log4javascript_stub.js",
# "libs/rangy/src/core/core.js",
# "libs/rangy/src/core/dom.js",
# "libs/rangy/src/core//domrange.js",
# "libs/rangy/src/core/wrappedrange.js",
# "libs/rangy/src/core/wrappedselection.js",
# "libs/rangy/src/modules/rangy-classapplier.js",
# "libs/rangy/src/modules/rangy-highlighter.js",
filters='jsmin',
output="public/js/common.js"
)
assets = Environment()
assets.register("js_all", js)
assets.register("css_all", css)
|
7a88109ec8cb5d44bfac3d6b2216a11eb710f0c3 | dcu/active_memory/__init__.py | dcu/active_memory/__init__.py | from dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import multipart_upload
import os.path
import reimport logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
multipart_upload(s3_bucket, aws_key, aws_secret, file_path, key, False, 0, None, 0)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
| Implement function to upload and rotate a file with an existing timestamp | Implement function to upload and rotate a file with an existing timestamp
| Python | mit | dirkcuys/active-memory | Implement function to upload and rotate a file with an existing timestamp | from dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import multipart_upload
import os.path
import reimport logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
multipart_upload(s3_bucket, aws_key, aws_secret, file_path, key, False, 0, None, 0)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
| <commit_before><commit_msg>Implement function to upload and rotate a file with an existing timestamp<commit_after> | from dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import multipart_upload
import os.path
import reimport logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
multipart_upload(s3_bucket, aws_key, aws_secret, file_path, key, False, 0, None, 0)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
| Implement function to upload and rotate a file with an existing timestampfrom dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import multipart_upload
import os.path
import reimport logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
multipart_upload(s3_bucket, aws_key, aws_secret, file_path, key, False, 0, None, 0)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
| <commit_before><commit_msg>Implement function to upload and rotate a file with an existing timestamp<commit_after>from dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import multipart_upload
import os.path
import reimport logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
multipart_upload(s3_bucket, aws_key, aws_secret, file_path, key, False, 0, None, 0)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
| |
dde9b3808b3e85f5513cc3604fd219a90774c047 | bot.py | bot.py |
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth.".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km.".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now...")
|
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth. \n#bot #space #voyager".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km. \n#bot #NEO #asteroids".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now... \n#bot #doomsday #NEO #asteroids")
| Update 0.4.2 - Added hashtags | Update 0.4.2
- Added hashtags
| Python | mit | FXelix/space_facts_bot |
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth.".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km.".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now...")
Update 0.4.2
- Added hashtags |
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth. \n#bot #space #voyager".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km. \n#bot #NEO #asteroids".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now... \n#bot #doomsday #NEO #asteroids")
| <commit_before>
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth.".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km.".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now...")
<commit_msg>Update 0.4.2
- Added hashtags<commit_after> |
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth. \n#bot #space #voyager".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km. \n#bot #NEO #asteroids".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now... \n#bot #doomsday #NEO #asteroids")
|
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth.".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km.".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now...")
Update 0.4.2
- Added hashtags
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth. \n#bot #space #voyager".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km. \n#bot #NEO #asteroids".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now... \n#bot #doomsday #NEO #asteroids")
| <commit_before>
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth.".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km.".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now...")
<commit_msg>Update 0.4.2
- Added hashtags<commit_after>
import tweepy
from secrets import *
from voyager_distance import get_distance
from NEO_flyby import NEO
# standard for accessing Twitter API
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
for distance in get_distance():
try:
voyager_message = "Voyager I is now {:,} km from Earth. \nVoyager II is now {:,} km from Earth. \n#bot #space #voyager".format(*distance)
api.update_status(voyager_message)
except IndexError:
pass
for data in NEO().flyby_data():
try:
new_neo = "Today's NEO: Object: {} at {}. Estimated diameter: {} - {} km. \n#bot #NEO #asteroids".format(*data)
api.update_status(new_neo)
except IndexError:
api.update_status("No near-Earth objects for today! We're save! ...at least for now... \n#bot #doomsday #NEO #asteroids")
|
734903c777fb237509c21a988f79318ec14e997d | st2api/st2api/controllers/sensors.py | st2api/st2api/controllers/sensors.py | import six
from pecan import abort
from pecan.rest import RestController
from mongoengine import ValidationError
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.base import jsexpose
from st2common.models.api.reactor import SensorTypeAPI
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(RestController):
@jsexpose(str)
def get_one(self, id):
"""
Get sensortype by id.
Handle:
GET /sensortype/1
"""
LOG.info('GET /sensortype/ with id=%s', id)
try:
sensor_type_db = SensorType.get_by_id(id)
except (ValueError, ValidationError):
LOG.exception('Database lookup for id="%s" resulted in exception.', id)
abort(http_client.NOT_FOUND)
return
sensor_type_api = SensorTypeAPI.from_model(sensor_type_db)
LOG.debug('GET /sensortype/ with id=%s, client_result=%s', id, sensor_type_api)
return sensor_type_api
@jsexpose(str)
def get_all(self, **kw):
"""
List all sensor types.
Handles requests:
GET /sensortypes/
"""
LOG.info('GET all /sensortypes/ with filters=%s', kw)
sensor_type_dbs = SensorType.get_all(**kw)
sensor_type_apis = [SensorTypeAPI.from_model(sensor_type_db) for sensor_type_db
in sensor_type_dbs]
return sensor_type_apis
| import six
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.api.reactor import SensorTypeAPI
from st2api.controllers.resource import ResourceController
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(ResourceController):
model = SensorTypeAPI
access = SensorType
supported_filters = {
'name': 'name',
'pack': 'content_pack'
}
options = {
'sort': ['content_pack', 'name']
}
| Use ResourceController instead of duplicating logic. | Use ResourceController instead of duplicating logic.
| Python | apache-2.0 | pinterb/st2,armab/st2,peak6/st2,pixelrebel/st2,Plexxi/st2,armab/st2,Plexxi/st2,punalpatel/st2,grengojbo/st2,jtopjian/st2,jtopjian/st2,StackStorm/st2,alfasin/st2,dennybaa/st2,lakshmi-kannan/st2,lakshmi-kannan/st2,pinterb/st2,grengojbo/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,pixelrebel/st2,alfasin/st2,emedvedev/st2,pixelrebel/st2,Itxaka/st2,Itxaka/st2,jtopjian/st2,nzlosh/st2,tonybaloney/st2,emedvedev/st2,punalpatel/st2,peak6/st2,tonybaloney/st2,tonybaloney/st2,Plexxi/st2,dennybaa/st2,emedvedev/st2,peak6/st2,StackStorm/st2,nzlosh/st2,pinterb/st2,armab/st2,nzlosh/st2,Itxaka/st2,grengojbo/st2,punalpatel/st2,Plexxi/st2,dennybaa/st2,lakshmi-kannan/st2,alfasin/st2 | import six
from pecan import abort
from pecan.rest import RestController
from mongoengine import ValidationError
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.base import jsexpose
from st2common.models.api.reactor import SensorTypeAPI
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(RestController):
@jsexpose(str)
def get_one(self, id):
"""
Get sensortype by id.
Handle:
GET /sensortype/1
"""
LOG.info('GET /sensortype/ with id=%s', id)
try:
sensor_type_db = SensorType.get_by_id(id)
except (ValueError, ValidationError):
LOG.exception('Database lookup for id="%s" resulted in exception.', id)
abort(http_client.NOT_FOUND)
return
sensor_type_api = SensorTypeAPI.from_model(sensor_type_db)
LOG.debug('GET /sensortype/ with id=%s, client_result=%s', id, sensor_type_api)
return sensor_type_api
@jsexpose(str)
def get_all(self, **kw):
"""
List all sensor types.
Handles requests:
GET /sensortypes/
"""
LOG.info('GET all /sensortypes/ with filters=%s', kw)
sensor_type_dbs = SensorType.get_all(**kw)
sensor_type_apis = [SensorTypeAPI.from_model(sensor_type_db) for sensor_type_db
in sensor_type_dbs]
return sensor_type_apis
Use ResourceController instead of duplicating logic. | import six
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.api.reactor import SensorTypeAPI
from st2api.controllers.resource import ResourceController
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(ResourceController):
model = SensorTypeAPI
access = SensorType
supported_filters = {
'name': 'name',
'pack': 'content_pack'
}
options = {
'sort': ['content_pack', 'name']
}
| <commit_before>import six
from pecan import abort
from pecan.rest import RestController
from mongoengine import ValidationError
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.base import jsexpose
from st2common.models.api.reactor import SensorTypeAPI
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(RestController):
@jsexpose(str)
def get_one(self, id):
"""
Get sensortype by id.
Handle:
GET /sensortype/1
"""
LOG.info('GET /sensortype/ with id=%s', id)
try:
sensor_type_db = SensorType.get_by_id(id)
except (ValueError, ValidationError):
LOG.exception('Database lookup for id="%s" resulted in exception.', id)
abort(http_client.NOT_FOUND)
return
sensor_type_api = SensorTypeAPI.from_model(sensor_type_db)
LOG.debug('GET /sensortype/ with id=%s, client_result=%s', id, sensor_type_api)
return sensor_type_api
@jsexpose(str)
def get_all(self, **kw):
"""
List all sensor types.
Handles requests:
GET /sensortypes/
"""
LOG.info('GET all /sensortypes/ with filters=%s', kw)
sensor_type_dbs = SensorType.get_all(**kw)
sensor_type_apis = [SensorTypeAPI.from_model(sensor_type_db) for sensor_type_db
in sensor_type_dbs]
return sensor_type_apis
<commit_msg>Use ResourceController instead of duplicating logic.<commit_after> | import six
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.api.reactor import SensorTypeAPI
from st2api.controllers.resource import ResourceController
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(ResourceController):
model = SensorTypeAPI
access = SensorType
supported_filters = {
'name': 'name',
'pack': 'content_pack'
}
options = {
'sort': ['content_pack', 'name']
}
| import six
from pecan import abort
from pecan.rest import RestController
from mongoengine import ValidationError
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.base import jsexpose
from st2common.models.api.reactor import SensorTypeAPI
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(RestController):
@jsexpose(str)
def get_one(self, id):
"""
Get sensortype by id.
Handle:
GET /sensortype/1
"""
LOG.info('GET /sensortype/ with id=%s', id)
try:
sensor_type_db = SensorType.get_by_id(id)
except (ValueError, ValidationError):
LOG.exception('Database lookup for id="%s" resulted in exception.', id)
abort(http_client.NOT_FOUND)
return
sensor_type_api = SensorTypeAPI.from_model(sensor_type_db)
LOG.debug('GET /sensortype/ with id=%s, client_result=%s', id, sensor_type_api)
return sensor_type_api
@jsexpose(str)
def get_all(self, **kw):
"""
List all sensor types.
Handles requests:
GET /sensortypes/
"""
LOG.info('GET all /sensortypes/ with filters=%s', kw)
sensor_type_dbs = SensorType.get_all(**kw)
sensor_type_apis = [SensorTypeAPI.from_model(sensor_type_db) for sensor_type_db
in sensor_type_dbs]
return sensor_type_apis
Use ResourceController instead of duplicating logic.import six
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.api.reactor import SensorTypeAPI
from st2api.controllers.resource import ResourceController
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(ResourceController):
model = SensorTypeAPI
access = SensorType
supported_filters = {
'name': 'name',
'pack': 'content_pack'
}
options = {
'sort': ['content_pack', 'name']
}
| <commit_before>import six
from pecan import abort
from pecan.rest import RestController
from mongoengine import ValidationError
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.base import jsexpose
from st2common.models.api.reactor import SensorTypeAPI
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(RestController):
@jsexpose(str)
def get_one(self, id):
"""
Get sensortype by id.
Handle:
GET /sensortype/1
"""
LOG.info('GET /sensortype/ with id=%s', id)
try:
sensor_type_db = SensorType.get_by_id(id)
except (ValueError, ValidationError):
LOG.exception('Database lookup for id="%s" resulted in exception.', id)
abort(http_client.NOT_FOUND)
return
sensor_type_api = SensorTypeAPI.from_model(sensor_type_db)
LOG.debug('GET /sensortype/ with id=%s, client_result=%s', id, sensor_type_api)
return sensor_type_api
@jsexpose(str)
def get_all(self, **kw):
"""
List all sensor types.
Handles requests:
GET /sensortypes/
"""
LOG.info('GET all /sensortypes/ with filters=%s', kw)
sensor_type_dbs = SensorType.get_all(**kw)
sensor_type_apis = [SensorTypeAPI.from_model(sensor_type_db) for sensor_type_db
in sensor_type_dbs]
return sensor_type_apis
<commit_msg>Use ResourceController instead of duplicating logic.<commit_after>import six
from st2common import log as logging
from st2common.persistence.reactor import SensorType
from st2common.models.api.reactor import SensorTypeAPI
from st2api.controllers.resource import ResourceController
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class SensorTypeController(ResourceController):
model = SensorTypeAPI
access = SensorType
supported_filters = {
'name': 'name',
'pack': 'content_pack'
}
options = {
'sort': ['content_pack', 'name']
}
|
4dc2d5710f5f34a0611c8d38a84ee3c5ecf79463 | uliweb/contrib/rbac/tags.py | uliweb/contrib/rbac/tags.py | from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, "%s"):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, "%s"):\n' % self.name]
| from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, %s):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, %s):\n' % self.name]
| Change role and permission tag parameter format, no need "" | Change role and permission tag parameter format, no need ""
| Python | bsd-2-clause | wwfifi/uliweb,wwfifi/uliweb,wwfifi/uliweb,limodou/uliweb,limodou/uliweb,wwfifi/uliweb,limodou/uliweb,limodou/uliweb | from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, "%s"):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, "%s"):\n' % self.name]
Change role and permission tag parameter format, no need "" | from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, %s):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, %s):\n' % self.name]
| <commit_before>from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, "%s"):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, "%s"):\n' % self.name]
<commit_msg>Change role and permission tag parameter format, no need ""<commit_after> | from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, %s):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, %s):\n' % self.name]
| from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, "%s"):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, "%s"):\n' % self.name]
Change role and permission tag parameter format, no need ""from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, %s):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, %s):\n' % self.name]
| <commit_before>from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, "%s"):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, "%s"):\n' % self.name]
<commit_msg>Change role and permission tag parameter format, no need ""<commit_after>from uliweb.core.template import BaseBlockNode
from uliweb import functions
class PermissionNode(BaseBlockNode):
def __init__(self, name='', content=None):
super(PermissionNode, self).__init__(name, content)
self.nodes = ['if functions.has_permission(request.user, %s):\n' % self.name]
def end(self):
self.nodes.append('pass\n')
class RoleNode(PermissionNode):
def __init__(self, name='', content=None):
super(RoleNode, self).__init__(name, content)
self.nodes = ['if functions.has_role(request.user, %s):\n' % self.name]
|
dfe475837d7942f05c9366819ee57ccc8aa0e58a | dmp/__init__.py | dmp/__init__.py | """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp.dmp
import rest.rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
| """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp
import rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
| Change to try and improve importability - Undone | Change to try and improve importability - Undone
| Python | apache-2.0 | Multiscale-Genomics/mg-dm-api,Multiscale-Genomics/mg-dm-api | """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp.dmp
import rest.rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
Change to try and improve importability - Undone | """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp
import rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
| <commit_before>"""
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp.dmp
import rest.rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
<commit_msg>Change to try and improve importability - Undone<commit_after> | """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp
import rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
| """
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp.dmp
import rest.rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
Change to try and improve importability - Undone"""
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp
import rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
| <commit_before>"""
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp.dmp
import rest.rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
<commit_msg>Change to try and improve importability - Undone<commit_after>"""
.. Copyright 2016 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dmp
import rest
__author__ = 'Mark McDowall'
__version__ = '0.0'
__license__ = 'Apache 2.0'
|
d0b40c90bd5af1ba9ef0d617c10700566d4e3775 | tests/unit/directory/test_directory.py | tests/unit/directory/test_directory.py | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main() | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
if __name__ == "__main__":
unittest.main() | Add TestDirectory.setUp() to the directory package's test file | Add TestDirectory.setUp() to the directory package's test file
| Python | mit | SizzlingVortex/classyfd | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()Add TestDirectory.setUp() to the directory package's test file | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
if __name__ == "__main__":
unittest.main() | <commit_before>"""Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()<commit_msg>Add TestDirectory.setUp() to the directory package's test file<commit_after> | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
if __name__ == "__main__":
unittest.main() | """Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()Add TestDirectory.setUp() to the directory package's test file"""Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
if __name__ == "__main__":
unittest.main() | <commit_before>"""Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()<commit_msg>Add TestDirectory.setUp() to the directory package's test file<commit_after>"""Contains the unit tests for the inner directory package"""
import unittest
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
if __name__ == "__main__":
unittest.main() |
24c6b759c1e8898946cdae591bce236e3ddbc2d8 | topStocks.py | topStocks.py | """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
| """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
else:
percentage = str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
| Make sure "percentage" is a string before concatenating it. | Make sure "percentage" is a string before concatenating it.
| Python | mit | trswany/topStocks | """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
Make sure "percentage" is a string before concatenating it. | """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
else:
percentage = str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
| <commit_before>"""Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
<commit_msg>Make sure "percentage" is a string before concatenating it.<commit_after> | """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
else:
percentage = str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
| """Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
Make sure "percentage" is a string before concatenating it."""Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
else:
percentage = str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
| <commit_before>"""Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
<commit_msg>Make sure "percentage" is a string before concatenating it.<commit_after>"""Find top stocks and post them to Twitter."""
import sys
import tweetPoster
import stockPrices
from stockList import getStockList
import time
def main():
# Get the list of stock symobls
currentStockList = getStockList()
# Get the stock prices
oldStockPrices = stockPrices.getStockPrices(currentStockList)
# Wait a while for the stock prices to change
print("Sleeping...")
time.sleep(30)
print("Done sleeping.")
# Get the new stock prices
currentStockPrices = stockPrices.getStockPrices(currentStockList)
# Find the fastest-changing stocks
topStockPrices = stockPrices.getFastestChangingPrices(currentStockPrices, oldStockPrices)
# Format the tweet text
tweetText = ""
for stockPrice in topStockPrices:
symbol = stockPrice[0]
percentage = round(float(stockPrice[1]), 2)
if percentage > 0:
percentage = '+' + str(percentage)
else:
percentage = str(percentage)
tweetText = tweetText + '#' + symbol + " : " + percentage + '%, '
# Post a tweet of the top stocks
print tweetText
tweetPoster.postTweet(tweetText)
if __name__ == '__main__':
sys.exit(main())
|
23c699e1be6c7d779491b62811913a0c73b45a39 | utilities/test_parse_vcf.py | utilities/test_parse_vcf.py | import pytest
import parse_vcf as pv
def test_openfile():
assert pv.openfile('test_parse_vcf.py')[0] == True
| import pytest
import parse_vcf as pv
def test_openfile():
# The real input file is called: 001.vcfmod
assert pv.openfile('001.vcfmod')[0] == True
| Use one of the real data files in test | Use one of the real data files in test
| Python | mit | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | import pytest
import parse_vcf as pv
def test_openfile():
assert pv.openfile('test_parse_vcf.py')[0] == True
Use one of the real data files in test | import pytest
import parse_vcf as pv
def test_openfile():
# The real input file is called: 001.vcfmod
assert pv.openfile('001.vcfmod')[0] == True
| <commit_before>import pytest
import parse_vcf as pv
def test_openfile():
assert pv.openfile('test_parse_vcf.py')[0] == True
<commit_msg>Use one of the real data files in test<commit_after> | import pytest
import parse_vcf as pv
def test_openfile():
# The real input file is called: 001.vcfmod
assert pv.openfile('001.vcfmod')[0] == True
| import pytest
import parse_vcf as pv
def test_openfile():
assert pv.openfile('test_parse_vcf.py')[0] == True
Use one of the real data files in testimport pytest
import parse_vcf as pv
def test_openfile():
# The real input file is called: 001.vcfmod
assert pv.openfile('001.vcfmod')[0] == True
| <commit_before>import pytest
import parse_vcf as pv
def test_openfile():
assert pv.openfile('test_parse_vcf.py')[0] == True
<commit_msg>Use one of the real data files in test<commit_after>import pytest
import parse_vcf as pv
def test_openfile():
# The real input file is called: 001.vcfmod
assert pv.openfile('001.vcfmod')[0] == True
|
2e5c0b1507dcfac46c380102ff338a4be9f25d97 | tests/sentry/auth/providers/test_oauth2.py | tests/sentry/auth/providers/test_oauth2.py | from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
| from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'data': {'access_token': 'access_token'},
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
| Correct identity data in test | Correct identity data in test
| Python | bsd-3-clause | daevaorn/sentry,ewdurbin/sentry,mvaled/sentry,daevaorn/sentry,imankulov/sentry,llonchj/sentry,looker/sentry,BayanGroup/sentry,felixbuenemann/sentry,jean/sentry,vperron/sentry,BuildingLink/sentry,argonemyth/sentry,mvaled/sentry,boneyao/sentry,nicholasserra/sentry,gencer/sentry,JTCunning/sentry,imankulov/sentry,JamesMura/sentry,fotinakis/sentry,beeftornado/sentry,gencer/sentry,TedaLIEz/sentry,kevinlondon/sentry,ngonzalvez/sentry,JamesMura/sentry,ngonzalvez/sentry,1tush/sentry,songyi199111/sentry,songyi199111/sentry,zenefits/sentry,mvaled/sentry,JackDanger/sentry,boneyao/sentry,korealerts1/sentry,ewdurbin/sentry,wong2/sentry,TedaLIEz/sentry,fotinakis/sentry,boneyao/sentry,BayanGroup/sentry,zenefits/sentry,pauloschilling/sentry,BuildingLink/sentry,gg7/sentry,1tush/sentry,kevinlondon/sentry,pauloschilling/sentry,JamesMura/sentry,looker/sentry,BuildingLink/sentry,BuildingLink/sentry,songyi199111/sentry,gencer/sentry,fuziontech/sentry,mitsuhiko/sentry,ngonzalvez/sentry,nicholasserra/sentry,looker/sentry,drcapulet/sentry,daevaorn/sentry,mvaled/sentry,vperron/sentry,felixbuenemann/sentry,1tush/sentry,wong2/sentry,JTCunning/sentry,llonchj/sentry,ifduyue/sentry,argonemyth/sentry,Kryz/sentry,ifduyue/sentry,Natim/sentry,wujuguang/sentry,JackDanger/sentry,fuziontech/sentry,jean/sentry,gg7/sentry,hongliang5623/sentry,Kryz/sentry,gg7/sentry,mvaled/sentry,wujuguang/sentry,alexm92/sentry,imankulov/sentry,gencer/sentry,wujuguang/sentry,JackDanger/sentry,alexm92/sentry,vperron/sentry,argonemyth/sentry,pauloschilling/sentry,BuildingLink/sentry,Natim/sentry,fuziontech/sentry,fotinakis/sentry,ifduyue/sentry,kevinastone/sentry,beeftornado/sentry,daevaorn/sentry,wong2/sentry,Natim/sentry,nicholasserra/sentry,kevinastone/sentry,gencer/sentry,mvaled/sentry,beeftornado/sentry,BayanGroup/sentry,hongliang5623/sentry,JTCunning/sentry,zenefits/sentry,looker/sentry,ifduyue/sentry,drcapulet/sentry,jean/sentry,mitsuhiko/sentry,felixbuenemann/sentry,Kryz/sentry,zenefits/sentry,jean/sentry,TedaLIEz/sentry,zenefits/sentry,korealerts1/sentry,alexm92/sentry,kevinastone/sentry,jean/sentry,JamesMura/sentry,ewdurbin/sentry,kevinlondon/sentry,JamesMura/sentry,korealerts1/sentry,fotinakis/sentry,hongliang5623/sentry,drcapulet/sentry,looker/sentry,llonchj/sentry,ifduyue/sentry | from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
Correct identity data in test | from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'data': {'access_token': 'access_token'},
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
| <commit_before>from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
<commit_msg>Correct identity data in test<commit_after> | from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'data': {'access_token': 'access_token'},
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
| from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
Correct identity data in testfrom __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'data': {'access_token': 'access_token'},
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
| <commit_before>from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'access_token': 'access_token',
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
<commit_msg>Correct identity data in test<commit_after>from __future__ import absolute_import, print_function
import pytest
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import OAuth2Provider
from sentry.models import AuthIdentity, AuthProvider
from sentry.testutils import TestCase
class OAuth2ProviderTest(TestCase):
def setUp(self):
self.org = self.create_organization(owner=self.user)
self.user = self.create_user('foo@example.com')
self.auth_provider = AuthProvider.objects.create(
provider='oauth2',
organization=self.org,
)
self.provider = self.get_provider()
super(OAuth2ProviderTest, self).setUp()
def get_provider(self):
self.provider = OAuth2Provider(
key=self.auth_provider.provider
)
def test_refresh_identity_without_refresh_token(self):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=self.user,
data={
'data': {'access_token': 'access_token'},
}
)
provider = OAuth2Provider(key=self.auth_provider.provider)
with pytest.raises(IdentityNotValid):
provider.refresh_identity(auth_identity)
|
115c9343e4aa6bf39dd2038a53a32fb5695fce2b | tools/pygments_k3/pygments_k3/lexer.py | tools/pygments_k3/pygments_k3/lexer.py | from pygments.token import Keyword, Text, Token
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
]
}
tokens = {
'root': [
(r'//.*?$', Token.Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'.*\n', Text),
],
}
| from pygments.token import Comment, Keyword, Literal, Operator, Text
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
"true", "false"
]
}
tokens = {
'root': [
(r'//.*?$', Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'[+\-*/<>=!&|\\():;,]+', Operator),
(r'[0-9]+(\.[0-9]*)?', Literal.Number),
(r'"[^"]*"', Literal.String),
(r'\b([_a-zA-Z][_a-zA-Z0-9]*)\b', Text),
(r'\s+', Text.Whitespace)
],
}
| Add more syntactic construct lexing. | Add more syntactic construct lexing.
| Python | apache-2.0 | DaMSL/K3,DaMSL/K3,yliu120/K3 | from pygments.token import Keyword, Text, Token
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
]
}
tokens = {
'root': [
(r'//.*?$', Token.Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'.*\n', Text),
],
}
Add more syntactic construct lexing. | from pygments.token import Comment, Keyword, Literal, Operator, Text
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
"true", "false"
]
}
tokens = {
'root': [
(r'//.*?$', Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'[+\-*/<>=!&|\\():;,]+', Operator),
(r'[0-9]+(\.[0-9]*)?', Literal.Number),
(r'"[^"]*"', Literal.String),
(r'\b([_a-zA-Z][_a-zA-Z0-9]*)\b', Text),
(r'\s+', Text.Whitespace)
],
}
| <commit_before>from pygments.token import Keyword, Text, Token
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
]
}
tokens = {
'root': [
(r'//.*?$', Token.Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'.*\n', Text),
],
}
<commit_msg>Add more syntactic construct lexing.<commit_after> | from pygments.token import Comment, Keyword, Literal, Operator, Text
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
"true", "false"
]
}
tokens = {
'root': [
(r'//.*?$', Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'[+\-*/<>=!&|\\():;,]+', Operator),
(r'[0-9]+(\.[0-9]*)?', Literal.Number),
(r'"[^"]*"', Literal.String),
(r'\b([_a-zA-Z][_a-zA-Z0-9]*)\b', Text),
(r'\s+', Text.Whitespace)
],
}
| from pygments.token import Keyword, Text, Token
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
]
}
tokens = {
'root': [
(r'//.*?$', Token.Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'.*\n', Text),
],
}
Add more syntactic construct lexing.from pygments.token import Comment, Keyword, Literal, Operator, Text
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
"true", "false"
]
}
tokens = {
'root': [
(r'//.*?$', Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'[+\-*/<>=!&|\\():;,]+', Operator),
(r'[0-9]+(\.[0-9]*)?', Literal.Number),
(r'"[^"]*"', Literal.String),
(r'\b([_a-zA-Z][_a-zA-Z0-9]*)\b', Text),
(r'\s+', Text.Whitespace)
],
}
| <commit_before>from pygments.token import Keyword, Text, Token
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
]
}
tokens = {
'root': [
(r'//.*?$', Token.Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'.*\n', Text),
],
}
<commit_msg>Add more syntactic construct lexing.<commit_after>from pygments.token import Comment, Keyword, Literal, Operator, Text
from pygments.lexer import RegexLexer
class K3Lexer(RegexLexer):
name = 'K3'
aliases = ['k3']
filenames = ['*.k3']
keywords = {
'preprocessor': "include",
"declaration": [
"annotation", "declare", "feed", "provides", "requires", "source", "trigger"
],
"type": ["bool", "int", "real", "string", "option", "ind", "collection"],
"expression": [
"if", "then", "else", "let", "in", "case", "of", "bind", "as", "some", "none",
"true", "false"
]
}
tokens = {
'root': [
(r'//.*?$', Comment),
(r'\b(' + r'|'.join(keywords["preprocessor"]) + r')\s*\b(?!\.)', Keyword.Namespace),
(r'\b(' + r'|'.join(keywords["declaration"]) + r')\s*\b(?!\.)', Keyword.Declaration),
(r'\b(' + r'|'.join(keywords["type"]) + r')\s*\b(?!\.)', Keyword.Type),
(r'\b(' + r'|'.join(keywords["expression"]) + r')\s*\b(?!\.)', Keyword.Reserved),
(r'[+\-*/<>=!&|\\():;,]+', Operator),
(r'[0-9]+(\.[0-9]*)?', Literal.Number),
(r'"[^"]*"', Literal.String),
(r'\b([_a-zA-Z][_a-zA-Z0-9]*)\b', Text),
(r'\s+', Text.Whitespace)
],
}
|
b20cfef1e54d9f22769f6d0ec6ae06031bf86ec3 | tests/CrawlerProcess/asyncio_deferred_signal.py | tests/CrawlerProcess/asyncio_deferred_signal.py | import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
loop = asyncio.get_event_loop()
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
| import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
| Remove unnecessary line from test | Remove unnecessary line from test
| Python | bsd-3-clause | dangra/scrapy,pablohoffman/scrapy,scrapy/scrapy,pablohoffman/scrapy,elacuesta/scrapy,pawelmhm/scrapy,dangra/scrapy,pawelmhm/scrapy,elacuesta/scrapy,elacuesta/scrapy,pawelmhm/scrapy,pablohoffman/scrapy,dangra/scrapy,scrapy/scrapy,scrapy/scrapy | import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
loop = asyncio.get_event_loop()
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
Remove unnecessary line from test | import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
| <commit_before>import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
loop = asyncio.get_event_loop()
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
<commit_msg>Remove unnecessary line from test<commit_after> | import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
| import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
loop = asyncio.get_event_loop()
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
Remove unnecessary line from testimport asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
| <commit_before>import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
loop = asyncio.get_event_loop()
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
<commit_msg>Remove unnecessary line from test<commit_after>import asyncio
import sys
from scrapy import Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.defer import deferred_from_coro
from twisted.internet.defer import Deferred
class UppercasePipeline:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item, spider):
return {"url": item["url"].upper()}
class UrlSpider(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
})
process.crawl(UrlSpider)
process.start()
|
819e34fb8cd60a25b7796508f72a1e9ba00b5faf | incuna_test_utils/factories/user.py | incuna_test_utils/factories/user.py | import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = get_user_model()
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
| import factory
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError: # Django 1.4
from django.contrib.auth.models import User
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
| Fix UserFactory for django 1.4 | Fix UserFactory for django 1.4
| Python | bsd-2-clause | incuna/incuna-test-utils,incuna/incuna-test-utils | import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = get_user_model()
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
Fix UserFactory for django 1.4 | import factory
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError: # Django 1.4
from django.contrib.auth.models import User
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
| <commit_before>import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = get_user_model()
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
<commit_msg>Fix UserFactory for django 1.4<commit_after> | import factory
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError: # Django 1.4
from django.contrib.auth.models import User
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
| import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = get_user_model()
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
Fix UserFactory for django 1.4import factory
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError: # Django 1.4
from django.contrib.auth.models import User
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
| <commit_before>import factory
from django.contrib.auth import get_user_model
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = get_user_model()
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
<commit_msg>Fix UserFactory for django 1.4<commit_after>import factory
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError: # Django 1.4
from django.contrib.auth.models import User
class UserFactory(factory.DjangoModelFactory):
FACTORY_FOR = User
email = factory.Sequence(lambda i: 'email{}@example.com'.format(i))
name = factory.Sequence(lambda i: 'Test User {}'.format(i))
|
d953055801c8d618c70cea81e3e35684122c66a7 | setuptools/config/__init__.py | setuptools/config/__init__.py | """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| Add stacklevel=2 to make calling code clear | Add stacklevel=2 to make calling code clear | Python | mit | pypa/setuptools,pypa/setuptools,pypa/setuptools | """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
Add stacklevel=2 to make calling code clear | """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| <commit_before>"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
<commit_msg>Add stacklevel=2 to make calling code clear<commit_after> | """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| """For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
Add stacklevel=2 to make calling code clear"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
| <commit_before>"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
<commit_msg>Add stacklevel=2 to make calling code clear<commit_after>"""For backward compatibility, expose main functions from
``setuptools.config.setupcfg``
"""
import warnings
from functools import wraps
from textwrap import dedent
from typing import Callable, TypeVar, cast
from .._deprecation_warning import SetuptoolsDeprecationWarning
from . import setupcfg
Fn = TypeVar("Fn", bound=Callable)
__all__ = ('parse_configuration', 'read_configuration')
def _deprecation_notice(fn: Fn) -> Fn:
@wraps(fn)
def _wrapper(*args, **kwargs):
msg = f"""\
As setuptools moves its configuration towards `pyproject.toml`,
`{__name__}.{fn.__name__}` became deprecated.
For the time being, you can use the `{setupcfg.__name__}` module
to access a backward compatible API, but this module is provisional
and might be removed in the future.
"""
warnings.warn(dedent(msg), SetuptoolsDeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return cast(Fn, _wrapper)
read_configuration = _deprecation_notice(setupcfg.read_configuration)
parse_configuration = _deprecation_notice(setupcfg.parse_configuration)
|
ef544497b3d595582e51f545c5250d1e92373577 | lexos/models/bct_model.py | lexos/models/bct_model.py | """This is a model to produce dendrograms of the dtm."""
import pandas as pd
from typing import NamedTuple, Optional
from lexos.models.base_model import BaseModel
from lexos.models.matrix_model import MatrixModel, IdTempLabelMap
from lexos.receivers.bct_receiver import BCTOption, BCTReceiver
class BCTTestOptions(NamedTuple):
"""A typed tuple to hold test options."""
doc_term_matrix: pd.DataFrame
front_end_option: BCTOption
id_temp_label_map: IdTempLabelMap
class BCTModel(BaseModel):
"""The BCTModel inherits from the BaseModel."""
def __init__(self, test_options: Optional[BCTTestOptions] = None):
"""Generate bootstrap consensus tree.
:param test_options: The input used in testing to override the
dynamically loaded option.
"""
super().__init__()
if test_options is not None:
self._test_dtm = test_options.doc_term_matrix
self._test_front_end_option = test_options.front_end_option
self._test_id_temp_label_map = test_options.id_temp_label_map
else:
self._test_dtm = None
self._test_front_end_option = None
self._test_id_temp_label_map = None
@property
def _doc_term_matrix(self) -> pd.DataFrame:
""":return: the document term matrix."""
return self._test_dtm if self._test_dtm is not None \
else MatrixModel().get_matrix()
@property
def _id_temp_label_map(self) -> IdTempLabelMap:
""":return: a map takes an id to temp labels."""
return self._test_id_temp_label_map \
if self._test_id_temp_label_map is not None \
else MatrixModel().get_id_temp_label_map()
@property
def _bct_option(self) -> BCTOption:
return self._test_front_end_option \
if self._test_front_end_option is not None \
else BCTReceiver().options_from_front_end()
| Set up the bct model | Set up the bct model
| Python | mit | WheatonCS/Lexos,WheatonCS/Lexos,WheatonCS/Lexos | Set up the bct model | """This is a model to produce dendrograms of the dtm."""
import pandas as pd
from typing import NamedTuple, Optional
from lexos.models.base_model import BaseModel
from lexos.models.matrix_model import MatrixModel, IdTempLabelMap
from lexos.receivers.bct_receiver import BCTOption, BCTReceiver
class BCTTestOptions(NamedTuple):
"""A typed tuple to hold test options."""
doc_term_matrix: pd.DataFrame
front_end_option: BCTOption
id_temp_label_map: IdTempLabelMap
class BCTModel(BaseModel):
"""The BCTModel inherits from the BaseModel."""
def __init__(self, test_options: Optional[BCTTestOptions] = None):
"""Generate bootstrap consensus tree.
:param test_options: The input used in testing to override the
dynamically loaded option.
"""
super().__init__()
if test_options is not None:
self._test_dtm = test_options.doc_term_matrix
self._test_front_end_option = test_options.front_end_option
self._test_id_temp_label_map = test_options.id_temp_label_map
else:
self._test_dtm = None
self._test_front_end_option = None
self._test_id_temp_label_map = None
@property
def _doc_term_matrix(self) -> pd.DataFrame:
""":return: the document term matrix."""
return self._test_dtm if self._test_dtm is not None \
else MatrixModel().get_matrix()
@property
def _id_temp_label_map(self) -> IdTempLabelMap:
""":return: a map takes an id to temp labels."""
return self._test_id_temp_label_map \
if self._test_id_temp_label_map is not None \
else MatrixModel().get_id_temp_label_map()
@property
def _bct_option(self) -> BCTOption:
return self._test_front_end_option \
if self._test_front_end_option is not None \
else BCTReceiver().options_from_front_end()
| <commit_before><commit_msg>Set up the bct model<commit_after> | """This is a model to produce dendrograms of the dtm."""
import pandas as pd
from typing import NamedTuple, Optional
from lexos.models.base_model import BaseModel
from lexos.models.matrix_model import MatrixModel, IdTempLabelMap
from lexos.receivers.bct_receiver import BCTOption, BCTReceiver
class BCTTestOptions(NamedTuple):
"""A typed tuple to hold test options."""
doc_term_matrix: pd.DataFrame
front_end_option: BCTOption
id_temp_label_map: IdTempLabelMap
class BCTModel(BaseModel):
"""The BCTModel inherits from the BaseModel."""
def __init__(self, test_options: Optional[BCTTestOptions] = None):
"""Generate bootstrap consensus tree.
:param test_options: The input used in testing to override the
dynamically loaded option.
"""
super().__init__()
if test_options is not None:
self._test_dtm = test_options.doc_term_matrix
self._test_front_end_option = test_options.front_end_option
self._test_id_temp_label_map = test_options.id_temp_label_map
else:
self._test_dtm = None
self._test_front_end_option = None
self._test_id_temp_label_map = None
@property
def _doc_term_matrix(self) -> pd.DataFrame:
""":return: the document term matrix."""
return self._test_dtm if self._test_dtm is not None \
else MatrixModel().get_matrix()
@property
def _id_temp_label_map(self) -> IdTempLabelMap:
""":return: a map takes an id to temp labels."""
return self._test_id_temp_label_map \
if self._test_id_temp_label_map is not None \
else MatrixModel().get_id_temp_label_map()
@property
def _bct_option(self) -> BCTOption:
return self._test_front_end_option \
if self._test_front_end_option is not None \
else BCTReceiver().options_from_front_end()
| Set up the bct model"""This is a model to produce dendrograms of the dtm."""
import pandas as pd
from typing import NamedTuple, Optional
from lexos.models.base_model import BaseModel
from lexos.models.matrix_model import MatrixModel, IdTempLabelMap
from lexos.receivers.bct_receiver import BCTOption, BCTReceiver
class BCTTestOptions(NamedTuple):
"""A typed tuple to hold test options."""
doc_term_matrix: pd.DataFrame
front_end_option: BCTOption
id_temp_label_map: IdTempLabelMap
class BCTModel(BaseModel):
"""The BCTModel inherits from the BaseModel."""
def __init__(self, test_options: Optional[BCTTestOptions] = None):
"""Generate bootstrap consensus tree.
:param test_options: The input used in testing to override the
dynamically loaded option.
"""
super().__init__()
if test_options is not None:
self._test_dtm = test_options.doc_term_matrix
self._test_front_end_option = test_options.front_end_option
self._test_id_temp_label_map = test_options.id_temp_label_map
else:
self._test_dtm = None
self._test_front_end_option = None
self._test_id_temp_label_map = None
@property
def _doc_term_matrix(self) -> pd.DataFrame:
""":return: the document term matrix."""
return self._test_dtm if self._test_dtm is not None \
else MatrixModel().get_matrix()
@property
def _id_temp_label_map(self) -> IdTempLabelMap:
""":return: a map takes an id to temp labels."""
return self._test_id_temp_label_map \
if self._test_id_temp_label_map is not None \
else MatrixModel().get_id_temp_label_map()
@property
def _bct_option(self) -> BCTOption:
return self._test_front_end_option \
if self._test_front_end_option is not None \
else BCTReceiver().options_from_front_end()
| <commit_before><commit_msg>Set up the bct model<commit_after>"""This is a model to produce dendrograms of the dtm."""
import pandas as pd
from typing import NamedTuple, Optional
from lexos.models.base_model import BaseModel
from lexos.models.matrix_model import MatrixModel, IdTempLabelMap
from lexos.receivers.bct_receiver import BCTOption, BCTReceiver
class BCTTestOptions(NamedTuple):
"""A typed tuple to hold test options."""
doc_term_matrix: pd.DataFrame
front_end_option: BCTOption
id_temp_label_map: IdTempLabelMap
class BCTModel(BaseModel):
"""The BCTModel inherits from the BaseModel."""
def __init__(self, test_options: Optional[BCTTestOptions] = None):
"""Generate bootstrap consensus tree.
:param test_options: The input used in testing to override the
dynamically loaded option.
"""
super().__init__()
if test_options is not None:
self._test_dtm = test_options.doc_term_matrix
self._test_front_end_option = test_options.front_end_option
self._test_id_temp_label_map = test_options.id_temp_label_map
else:
self._test_dtm = None
self._test_front_end_option = None
self._test_id_temp_label_map = None
@property
def _doc_term_matrix(self) -> pd.DataFrame:
""":return: the document term matrix."""
return self._test_dtm if self._test_dtm is not None \
else MatrixModel().get_matrix()
@property
def _id_temp_label_map(self) -> IdTempLabelMap:
""":return: a map takes an id to temp labels."""
return self._test_id_temp_label_map \
if self._test_id_temp_label_map is not None \
else MatrixModel().get_id_temp_label_map()
@property
def _bct_option(self) -> BCTOption:
return self._test_front_end_option \
if self._test_front_end_option is not None \
else BCTReceiver().options_from_front_end()
| |
67c68065fbe0ffffc6a72f1d67d23c583b03a6fe | neuroimaging/algorithms/tests/test_onesample.py | neuroimaging/algorithms/tests/test_onesample.py | from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.slow
@dec.data
def test_onesample1(self):
im1 = load_image('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im2 = load_image('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im3 = load_image('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
| from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.skipknownfailure
@dec.slow
@dec.data
def test_onesample1(self):
# FIXME: When we replace nipy's datasource with numpy's
# datasource, remove the string casting. _fullpath returns a
# 'path' object.
fp1 = repository._fullpath('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp1 = str(fp1)
im1 = load_image(fp1)
fp2 = repository._fullpath('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp2 = str(fp2)
im2 = load_image(fp2)
fp3 = repository._fullpath('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp3 = str(fp3)
im3 = load_image(fp3)
# FIXME: ImageSequenceIterator is not defined.
# ImageOneSample.__init__ fails.
# File "/Users/cburns/src/nipy-trunk/neuroimaging/algorithms/onesample.py", line 68, in __init__
# self.iterator = ImageSequenceIterator(input)
# NameError: global name 'ImageSequenceIterator' is not defined
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
| Update data file references. Skip known test failure to undefined image iterator. | Update data file references. Skip known test failure to undefined image iterator. | Python | bsd-3-clause | yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD | from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.slow
@dec.data
def test_onesample1(self):
im1 = load_image('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im2 = load_image('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im3 = load_image('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
Update data file references. Skip known test failure to undefined image iterator. | from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.skipknownfailure
@dec.slow
@dec.data
def test_onesample1(self):
# FIXME: When we replace nipy's datasource with numpy's
# datasource, remove the string casting. _fullpath returns a
# 'path' object.
fp1 = repository._fullpath('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp1 = str(fp1)
im1 = load_image(fp1)
fp2 = repository._fullpath('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp2 = str(fp2)
im2 = load_image(fp2)
fp3 = repository._fullpath('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp3 = str(fp3)
im3 = load_image(fp3)
# FIXME: ImageSequenceIterator is not defined.
# ImageOneSample.__init__ fails.
# File "/Users/cburns/src/nipy-trunk/neuroimaging/algorithms/onesample.py", line 68, in __init__
# self.iterator = ImageSequenceIterator(input)
# NameError: global name 'ImageSequenceIterator' is not defined
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
| <commit_before>from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.slow
@dec.data
def test_onesample1(self):
im1 = load_image('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im2 = load_image('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im3 = load_image('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
<commit_msg>Update data file references. Skip known test failure to undefined image iterator.<commit_after> | from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.skipknownfailure
@dec.slow
@dec.data
def test_onesample1(self):
# FIXME: When we replace nipy's datasource with numpy's
# datasource, remove the string casting. _fullpath returns a
# 'path' object.
fp1 = repository._fullpath('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp1 = str(fp1)
im1 = load_image(fp1)
fp2 = repository._fullpath('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp2 = str(fp2)
im2 = load_image(fp2)
fp3 = repository._fullpath('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp3 = str(fp3)
im3 = load_image(fp3)
# FIXME: ImageSequenceIterator is not defined.
# ImageOneSample.__init__ fails.
# File "/Users/cburns/src/nipy-trunk/neuroimaging/algorithms/onesample.py", line 68, in __init__
# self.iterator = ImageSequenceIterator(input)
# NameError: global name 'ImageSequenceIterator' is not defined
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
| from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.slow
@dec.data
def test_onesample1(self):
im1 = load_image('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im2 = load_image('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im3 = load_image('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
Update data file references. Skip known test failure to undefined image iterator.from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.skipknownfailure
@dec.slow
@dec.data
def test_onesample1(self):
# FIXME: When we replace nipy's datasource with numpy's
# datasource, remove the string casting. _fullpath returns a
# 'path' object.
fp1 = repository._fullpath('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp1 = str(fp1)
im1 = load_image(fp1)
fp2 = repository._fullpath('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp2 = str(fp2)
im2 = load_image(fp2)
fp3 = repository._fullpath('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp3 = str(fp3)
im3 = load_image(fp3)
# FIXME: ImageSequenceIterator is not defined.
# ImageOneSample.__init__ fails.
# File "/Users/cburns/src/nipy-trunk/neuroimaging/algorithms/onesample.py", line 68, in __init__
# self.iterator = ImageSequenceIterator(input)
# NameError: global name 'ImageSequenceIterator' is not defined
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
| <commit_before>from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.slow
@dec.data
def test_onesample1(self):
im1 = load_image('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im2 = load_image('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
im3 = load_image('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz',
repository)
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
<commit_msg>Update data file references. Skip known test failure to undefined image iterator.<commit_after>from neuroimaging.testing import *
from neuroimaging.algorithms.onesample import ImageOneSample
from neuroimaging.core.api import load_image
from neuroimaging.utils.tests.data import repository
class test_OneSample(TestCase):
@dec.skipknownfailure
@dec.slow
@dec.data
def test_onesample1(self):
# FIXME: When we replace nipy's datasource with numpy's
# datasource, remove the string casting. _fullpath returns a
# 'path' object.
fp1 = repository._fullpath('FIAC/fiac3/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp1 = str(fp1)
im1 = load_image(fp1)
fp2 = repository._fullpath('FIAC/fiac4/fonc3/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp2 = str(fp2)
im2 = load_image(fp2)
fp3 = repository._fullpath('FIAC/fiac5/fonc2/fsl/fmristat_run/contrasts/speaker/effect.nii.gz')
fp3 = str(fp3)
im3 = load_image(fp3)
# FIXME: ImageSequenceIterator is not defined.
# ImageOneSample.__init__ fails.
# File "/Users/cburns/src/nipy-trunk/neuroimaging/algorithms/onesample.py", line 68, in __init__
# self.iterator = ImageSequenceIterator(input)
# NameError: global name 'ImageSequenceIterator' is not defined
x = ImageOneSample([im1,im2,im3], clobber=True)
x.fit()
|
2611f7a4e4ef8fba25e45932c774a302299fd311 | tests/test_webob_middleware.py | tests/test_webob_middleware.py | # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
| # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, ")
resp.outputStream.write("there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
| Change test to show WebOb filters consume completely before resulting in a body | Change test to show WebOb filters consume completely before resulting in a body
| Python | apache-2.0 | jythontools/fireside,jythontools/fireside,jythontools/fireside | # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
Change test to show WebOb filters consume completely before resulting in a body | # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, ")
resp.outputStream.write("there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
| <commit_before># Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
<commit_msg>Change test to show WebOb filters consume completely before resulting in a body<commit_after> | # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, ")
resp.outputStream.write("there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
| # Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
Change test to show WebOb filters consume completely before resulting in a body# Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, ")
resp.outputStream.write("there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
| <commit_before># Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
<commit_msg>Change test to show WebOb filters consume completely before resulting in a body<commit_after># Test various types of WebOb middleware
import sys
from fireside import WSGIFilter
from webob.dec import wsgify
from servlet_support import * # FIXME be explicit
from javax.servlet import FilterChain
@wsgify.middleware
def all_caps(req, app):
resp = req.get_response(app)
resp.body = resp.body.upper()
return resp
def test_webob_filter():
req_mock = RequestMock()
resp_mock = ResponseMock()
bridge = RequestBridge(req_mock, AdaptedInputStream(), AdaptedErrLog())
bridge_map = dict_builder(bridge.asMap)()
req_wrapper = bridge.asWrapper()
filter = WSGIFilter()
filter.init(ServletConfigMock(
{ "wsgi.handler": "test_webob_middleware.all_caps" }))
class UnitChain(FilterChain):
def doFilter(self, req, resp):
resp.outputStream.write("hi, ")
resp.outputStream.write("there!\n")
filter.doFilter(req_wrapper, resp_mock, UnitChain())
assert next(resp_mock.outputStream) == b"HI, THERE!\n"
assert resp_mock.headers == {'Content-Length': '11', 'Content-type': 'text/plain'}
assert resp_mock.my_status == (200, "OK")
|
1fb77715c4766e3e437970a048154a5a9fe1b2c8 | tools/httpdtest.py | tools/httpdtest.py | #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
print(self.requestline)
for (key, header) in self.headers.items():
print("{} : {}".format(key, header))
print("---")
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(b"Thanks for all the fish...")
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import json
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
# Handle GET requests
def do_GET(self):
response = {}
# Capture the URL request
response["request"] = self.requestline
# Capture all header fields
headers = {}
for (key, header) in self.headers.items():
headers[key] = header
response["headers"] = headers
# Return the response
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response, sort_keys=True, indent=4).encode("utf-8"))
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| Send back GET request in response | Send back GET request in response
The response to a GET request now returns the request string
and all header fields as a JSON encoded response.
| Python | bsd-3-clause | rolfmichelsen/Just4Fun | #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
print(self.requestline)
for (key, header) in self.headers.items():
print("{} : {}".format(key, header))
print("---")
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(b"Thanks for all the fish...")
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
Send back GET request in response
The response to a GET request now returns the request string
and all header fields as a JSON encoded response. | #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import json
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
# Handle GET requests
def do_GET(self):
response = {}
# Capture the URL request
response["request"] = self.requestline
# Capture all header fields
headers = {}
for (key, header) in self.headers.items():
headers[key] = header
response["headers"] = headers
# Return the response
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response, sort_keys=True, indent=4).encode("utf-8"))
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| <commit_before>#! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
print(self.requestline)
for (key, header) in self.headers.items():
print("{} : {}".format(key, header))
print("---")
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(b"Thanks for all the fish...")
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
<commit_msg>Send back GET request in response
The response to a GET request now returns the request string
and all header fields as a JSON encoded response.<commit_after> | #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import json
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
# Handle GET requests
def do_GET(self):
response = {}
# Capture the URL request
response["request"] = self.requestline
# Capture all header fields
headers = {}
for (key, header) in self.headers.items():
headers[key] = header
response["headers"] = headers
# Return the response
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response, sort_keys=True, indent=4).encode("utf-8"))
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| #! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
print(self.requestline)
for (key, header) in self.headers.items():
print("{} : {}".format(key, header))
print("---")
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(b"Thanks for all the fish...")
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
Send back GET request in response
The response to a GET request now returns the request string
and all header fields as a JSON encoded response.#! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import json
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
# Handle GET requests
def do_GET(self):
response = {}
# Capture the URL request
response["request"] = self.requestline
# Capture all header fields
headers = {}
for (key, header) in self.headers.items():
headers[key] = header
response["headers"] = headers
# Return the response
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response, sort_keys=True, indent=4).encode("utf-8"))
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| <commit_before>#! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
print(self.requestline)
for (key, header) in self.headers.items():
print("{} : {}".format(key, header))
print("---")
self.send_response(200, "OK")
self.end_headers()
self.wfile.write(b"Thanks for all the fish...")
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
<commit_msg>Send back GET request in response
The response to a GET request now returns the request string
and all header fields as a JSON encoded response.<commit_after>#! /bin/env python3
#
# Simple HTTP server for testing purposes.
import http.server
import json
import sys
from argparse import ArgumentParser
class RequestHandler(http.server.BaseHTTPRequestHandler):
# Handle GET requests
def do_GET(self):
response = {}
# Capture the URL request
response["request"] = self.requestline
# Capture all header fields
headers = {}
for (key, header) in self.headers.items():
headers[key] = header
response["headers"] = headers
# Return the response
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response, sort_keys=True, indent=4).encode("utf-8"))
def getArguments():
"""
Get command line arguments.
"""
description = "Simple HTTP server for testing."
epilog = '''
'''
argParser = ArgumentParser(description=description, epilog=epilog, fromfile_prefix_chars='@')
argParser.add_argument('port', type=int, nargs='?', help='Server port')
return argParser.parse_args()
def main():
args = getArguments()
port = args.port
print("Starting HTTP server on port {}".format(port))
serverAddress = ("", port)
server = http.server.HTTPServer(serverAddress , RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
|
cfc734a1b349f2ec32cd51523f583dae6c820a8a | tweepy/__init__.py | tweepy/__init__.py | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| Fix version to include 3 digits. | Fix version to include 3 digits.
| Python | mit | yared-bezum/tweepy,zhenv5/tweepy,vikasgorur/tweepy,sidewire/tweepy,arunxarun/tweepy,srimanthd/tweepy,edsu/tweepy,elijah513/tweepy,wjt/tweepy,awangga/tweepy,IsaacHaze/tweepy,techieshark/tweepy,vivek8943/tweepy,takeshineshiro/tweepy,obskyr/tweepy,hackebrot/tweepy,cogniteev/tweepy,tweepy/tweepy,iamjakob/tweepy,kylemanna/tweepy,truekonrads/tweepy,aleczadikian/tweepy,damchilly/tweepy,LikeABird/tweepy,robbiewoods05/tweepy,aganzha/tweepy,kcompher/tweepy,sa8/tweepy,bconnelly/tweepy,svven/tweepy,conversocial/tweepy,tsablic/tweepy,raymondethan/tweepy,thelostscientist/tweepy,atomicjets/tweepy,kskk02/tweepy,Choko256/tweepy,rudraksh125/tweepy,vishnugonela/tweepy,tuxos/tweepy | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
Fix version to include 3 digits. | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| <commit_before># Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
<commit_msg>Fix version to include 3 digits.<commit_after> | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
Fix version to include 3 digits.# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
| <commit_before># Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
<commit_msg>Fix version to include 3 digits.<commit_after># Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
"""
Tweepy Twitter API library
"""
__version__ = '3.1.0'
__author__ = 'Joshua Roesslein'
__license__ = 'MIT'
from tweepy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResults, ModelFactory, Category
from tweepy.error import TweepError
from tweepy.api import API
from tweepy.cache import Cache, MemoryCache, FileCache
from tweepy.auth import OAuthHandler, AppAuthHandler
from tweepy.streaming import Stream, StreamListener
from tweepy.cursor import Cursor
# Global, unauthenticated instance of API
api = API()
def debug(enable=True, level=1):
import httplib
httplib.HTTPConnection.debuglevel = level
|
44650a0b3d395b4201a039bd2f3eb916987dce8d | _grains/osqueryinfo.py | _grains/osqueryinfo.py | # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.which(path)
break
break
return grains
| # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.path.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.path.which(path)
break
break
return grains
| Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0 | DeprecationWarning: Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0
| Python | apache-2.0 | hubblestack/hubble-salt | # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.which(path)
break
break
return grains
DeprecationWarning: Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0 | # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.path.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.path.which(path)
break
break
return grains
| <commit_before># -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.which(path)
break
break
return grains
<commit_msg>DeprecationWarning: Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0<commit_after> | # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.path.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.path.which(path)
break
break
return grains
| # -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.which(path)
break
break
return grains
DeprecationWarning: Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0# -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.path.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.path.which(path)
break
break
return grains
| <commit_before># -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.which(path)
break
break
return grains
<commit_msg>DeprecationWarning: Use of salt.utils.which detected. This function has been moved to salt.utils.path.which as of Salt 2018.3.0<commit_after># -*- coding: utf-8 -*-
import salt.utils
import salt.modules.cmdmod
__salt__ = {'cmd.run': salt.modules.cmdmod._run_quiet}
def osquerygrain():
'''
Return osquery version in grain
'''
# Provides:
# osqueryversion
# osquerybinpath
grains = {}
option = '--version'
# Prefer our /opt/osquery/osqueryi if present
osqueryipaths = ('/opt/osquery/osqueryi', 'osqueryi', '/usr/bin/osqueryi')
for path in osqueryipaths:
if salt.utils.path.which(path):
for item in __salt__['cmd.run']('{0} {1}'.format(path, option)).split():
if item[:1].isdigit():
grains['osqueryversion'] = item
grains['osquerybinpath'] = salt.utils.path.which(path)
break
break
return grains
|
eed33b6ba9a3d5cf5d841d451ad03fd2f57c43bf | openfisca_senegal/senegal_taxbenefitsystem.py | openfisca_senegal/senegal_taxbenefitsystem.py | # -*- coding: utf-8 -*-
import os
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario | # -*- coding: utf-8 -*-
import os
import xml.etree.ElementTree
from openfisca_core import conv, legislationsxml
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario
def add_legislation_params(self, xml_string):
def input_to_xml_element(value, state=None):
return xml.etree.ElementTree.fromstring(value.encode('utf-8')), None
self._legislation_json = conv.check(conv.pipe(
input_to_xml_element,
legislationsxml.xml_legislation_to_json,
legislationsxml.validate_legislation_xml_json,
conv.function(lambda value: legislationsxml.transform_node_xml_json_to_json(value)[1]),
))(xml_string) | Implement adding legislation XML from string | Implement adding legislation XML from string
| Python | agpl-3.0 | openfisca/senegal | # -*- coding: utf-8 -*-
import os
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.ScenarioImplement adding legislation XML from string | # -*- coding: utf-8 -*-
import os
import xml.etree.ElementTree
from openfisca_core import conv, legislationsxml
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario
def add_legislation_params(self, xml_string):
def input_to_xml_element(value, state=None):
return xml.etree.ElementTree.fromstring(value.encode('utf-8')), None
self._legislation_json = conv.check(conv.pipe(
input_to_xml_element,
legislationsxml.xml_legislation_to_json,
legislationsxml.validate_legislation_xml_json,
conv.function(lambda value: legislationsxml.transform_node_xml_json_to_json(value)[1]),
))(xml_string) | <commit_before># -*- coding: utf-8 -*-
import os
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario<commit_msg>Implement adding legislation XML from string<commit_after> | # -*- coding: utf-8 -*-
import os
import xml.etree.ElementTree
from openfisca_core import conv, legislationsxml
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario
def add_legislation_params(self, xml_string):
def input_to_xml_element(value, state=None):
return xml.etree.ElementTree.fromstring(value.encode('utf-8')), None
self._legislation_json = conv.check(conv.pipe(
input_to_xml_element,
legislationsxml.xml_legislation_to_json,
legislationsxml.validate_legislation_xml_json,
conv.function(lambda value: legislationsxml.transform_node_xml_json_to_json(value)[1]),
))(xml_string) | # -*- coding: utf-8 -*-
import os
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.ScenarioImplement adding legislation XML from string# -*- coding: utf-8 -*-
import os
import xml.etree.ElementTree
from openfisca_core import conv, legislationsxml
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario
def add_legislation_params(self, xml_string):
def input_to_xml_element(value, state=None):
return xml.etree.ElementTree.fromstring(value.encode('utf-8')), None
self._legislation_json = conv.check(conv.pipe(
input_to_xml_element,
legislationsxml.xml_legislation_to_json,
legislationsxml.validate_legislation_xml_json,
conv.function(lambda value: legislationsxml.transform_node_xml_json_to_json(value)[1]),
))(xml_string) | <commit_before># -*- coding: utf-8 -*-
import os
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario<commit_msg>Implement adding legislation XML from string<commit_after># -*- coding: utf-8 -*-
import os
import xml.etree.ElementTree
from openfisca_core import conv, legislationsxml
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from . import entities, scenarios
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
class SenegalTaxBenefitSystem(TaxBenefitSystem):
"""Senegalese tax and benefit system"""
CURRENCY = u"FCFA"
def __init__(self):
super(SenegalTaxBenefitSystem, self).__init__(entities.entities)
self.Scenario = scenarios.Scenario
def add_legislation_params(self, xml_string):
def input_to_xml_element(value, state=None):
return xml.etree.ElementTree.fromstring(value.encode('utf-8')), None
self._legislation_json = conv.check(conv.pipe(
input_to_xml_element,
legislationsxml.xml_legislation_to_json,
legislationsxml.validate_legislation_xml_json,
conv.function(lambda value: legislationsxml.transform_node_xml_json_to_json(value)[1]),
))(xml_string) |
448d4697803ad6993d81ca556a63977f1c039a7f | requestsexceptions/__init__.py | requestsexceptions/__init__.py | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
| # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if insecure_requests and InsecureRequestWarning):
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
| Add SubjectAltNameWarning and a helper function | Add SubjectAltNameWarning and a helper function
shade hits the SubjectAltName problem when talking to Rackspace. Also,
one of the things you want to do is turn off the warnings - so add a
function for that.
Change-Id: I3a55c66e5a4033a47a9d8704dd30709a5c53edc9
| Python | apache-2.0 | openstack-infra/requestsexceptions | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
Add SubjectAltNameWarning and a helper function
shade hits the SubjectAltName problem when talking to Rackspace. Also,
one of the things you want to do is turn off the warnings - so add a
function for that.
Change-Id: I3a55c66e5a4033a47a9d8704dd30709a5c53edc9 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if insecure_requests and InsecureRequestWarning):
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
| <commit_before># Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
<commit_msg>Add SubjectAltNameWarning and a helper function
shade hits the SubjectAltName problem when talking to Rackspace. Also,
one of the things you want to do is turn off the warnings - so add a
function for that.
Change-Id: I3a55c66e5a4033a47a9d8704dd30709a5c53edc9<commit_after> | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if insecure_requests and InsecureRequestWarning):
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
| # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
Add SubjectAltNameWarning and a helper function
shade hits the SubjectAltName problem when talking to Rackspace. Also,
one of the things you want to do is turn off the warnings - so add a
function for that.
Change-Id: I3a55c66e5a4033a47a9d8704dd30709a5c53edc9# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if insecure_requests and InsecureRequestWarning):
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
| <commit_before># Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
<commit_msg>Add SubjectAltNameWarning and a helper function
shade hits the SubjectAltName problem when talking to Rackspace. Also,
one of the things you want to do is turn off the warnings - so add a
function for that.
Change-Id: I3a55c66e5a4033a47a9d8704dd30709a5c53edc9<commit_after># Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
try:
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
except ImportError:
try:
from urllib3.exceptions import InsecurePlatformWarning
except ImportError:
InsecurePlatformWarning = None
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
from urllib3.exceptions import InsecureRequestWarning
except ImportError:
InsecureRequestWarning = None
try:
from requests.packages.urllib3.exceptions import SubjectAltNameWarning
except ImportError:
try:
from urllib3.exceptions import SubjectAltNameWarning
except ImportError:
SubjectAltNameWarning = None
def squelch_warnings(insecure_requests=True):
if SubjectAltNameWarning:
warnings.filterwarnings('ignore', category=SubjectAltNameWarning)
if InsecurePlatformWarning:
warnings.filterwarnings('ignore', category=InsecurePlatformWarning)
if insecure_requests and InsecureRequestWarning):
warnings.filterwarnings('ignore', category=InsecureRequestWarning)
|
54db30c6db4df1f8a0e9030258cfcab4937d0c13 | python/castle-on-the-grid.py | python/castle-on-the-grid.py | #!/bin/python3
import math
import os
import random
import re
import sys
class CastleOnGrid:
def __init__(self, grid, start, goal):
self.grid = grid
self.start = start
self.goal = goal
def min_moves(self):
pass
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, start, goal)
min_moves = castle_on_grid.min_moves()
print(min_moves)
| #!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
class CastleOnGrid:
def __init__(self, grid, grid_size, start, goal):
self.grid = grid
self.grid_size = grid_size
self.start = start
self.goal = goal
def min_moves(self):
to_visit = deque()
visited = []
to_visit.append((self.start, 0))
visited.append(self.start)
current_coords = None
while current_coords != self.goal:
current_coords, current_depth = to_visit.popleft()
for move in self.possible_moves(current_coords):
if move not in visited:
to_visit.append((move, current_depth + 1))
visited.append(move)
return current_depth
def possible_moves(self, coords):
possible = []
row, col = coords
for row_i in range(row + 1, self.grid_size):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for row_i in range(row - 1, -1, -1):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for col_i in range(col + 1, self.grid_size):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
for col_i in range(col - 1, -1, -1):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
return possible
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, grid_size, start, goal)
# print(castle_on_grid.possible_moves((1, 1)))
min_moves = castle_on_grid.min_moves()
print(min_moves)
| Solve castle on grid (timeout test case 11) | Solve castle on grid (timeout test case 11)
| Python | mit | rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank | #!/bin/python3
import math
import os
import random
import re
import sys
class CastleOnGrid:
def __init__(self, grid, start, goal):
self.grid = grid
self.start = start
self.goal = goal
def min_moves(self):
pass
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, start, goal)
min_moves = castle_on_grid.min_moves()
print(min_moves)
Solve castle on grid (timeout test case 11) | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
class CastleOnGrid:
def __init__(self, grid, grid_size, start, goal):
self.grid = grid
self.grid_size = grid_size
self.start = start
self.goal = goal
def min_moves(self):
to_visit = deque()
visited = []
to_visit.append((self.start, 0))
visited.append(self.start)
current_coords = None
while current_coords != self.goal:
current_coords, current_depth = to_visit.popleft()
for move in self.possible_moves(current_coords):
if move not in visited:
to_visit.append((move, current_depth + 1))
visited.append(move)
return current_depth
def possible_moves(self, coords):
possible = []
row, col = coords
for row_i in range(row + 1, self.grid_size):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for row_i in range(row - 1, -1, -1):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for col_i in range(col + 1, self.grid_size):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
for col_i in range(col - 1, -1, -1):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
return possible
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, grid_size, start, goal)
# print(castle_on_grid.possible_moves((1, 1)))
min_moves = castle_on_grid.min_moves()
print(min_moves)
| <commit_before>#!/bin/python3
import math
import os
import random
import re
import sys
class CastleOnGrid:
def __init__(self, grid, start, goal):
self.grid = grid
self.start = start
self.goal = goal
def min_moves(self):
pass
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, start, goal)
min_moves = castle_on_grid.min_moves()
print(min_moves)
<commit_msg>Solve castle on grid (timeout test case 11)<commit_after> | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
class CastleOnGrid:
def __init__(self, grid, grid_size, start, goal):
self.grid = grid
self.grid_size = grid_size
self.start = start
self.goal = goal
def min_moves(self):
to_visit = deque()
visited = []
to_visit.append((self.start, 0))
visited.append(self.start)
current_coords = None
while current_coords != self.goal:
current_coords, current_depth = to_visit.popleft()
for move in self.possible_moves(current_coords):
if move not in visited:
to_visit.append((move, current_depth + 1))
visited.append(move)
return current_depth
def possible_moves(self, coords):
possible = []
row, col = coords
for row_i in range(row + 1, self.grid_size):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for row_i in range(row - 1, -1, -1):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for col_i in range(col + 1, self.grid_size):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
for col_i in range(col - 1, -1, -1):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
return possible
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, grid_size, start, goal)
# print(castle_on_grid.possible_moves((1, 1)))
min_moves = castle_on_grid.min_moves()
print(min_moves)
| #!/bin/python3
import math
import os
import random
import re
import sys
class CastleOnGrid:
def __init__(self, grid, start, goal):
self.grid = grid
self.start = start
self.goal = goal
def min_moves(self):
pass
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, start, goal)
min_moves = castle_on_grid.min_moves()
print(min_moves)
Solve castle on grid (timeout test case 11)#!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
class CastleOnGrid:
def __init__(self, grid, grid_size, start, goal):
self.grid = grid
self.grid_size = grid_size
self.start = start
self.goal = goal
def min_moves(self):
to_visit = deque()
visited = []
to_visit.append((self.start, 0))
visited.append(self.start)
current_coords = None
while current_coords != self.goal:
current_coords, current_depth = to_visit.popleft()
for move in self.possible_moves(current_coords):
if move not in visited:
to_visit.append((move, current_depth + 1))
visited.append(move)
return current_depth
def possible_moves(self, coords):
possible = []
row, col = coords
for row_i in range(row + 1, self.grid_size):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for row_i in range(row - 1, -1, -1):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for col_i in range(col + 1, self.grid_size):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
for col_i in range(col - 1, -1, -1):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
return possible
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, grid_size, start, goal)
# print(castle_on_grid.possible_moves((1, 1)))
min_moves = castle_on_grid.min_moves()
print(min_moves)
| <commit_before>#!/bin/python3
import math
import os
import random
import re
import sys
class CastleOnGrid:
def __init__(self, grid, start, goal):
self.grid = grid
self.start = start
self.goal = goal
def min_moves(self):
pass
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, start, goal)
min_moves = castle_on_grid.min_moves()
print(min_moves)
<commit_msg>Solve castle on grid (timeout test case 11)<commit_after>#!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
class CastleOnGrid:
def __init__(self, grid, grid_size, start, goal):
self.grid = grid
self.grid_size = grid_size
self.start = start
self.goal = goal
def min_moves(self):
to_visit = deque()
visited = []
to_visit.append((self.start, 0))
visited.append(self.start)
current_coords = None
while current_coords != self.goal:
current_coords, current_depth = to_visit.popleft()
for move in self.possible_moves(current_coords):
if move not in visited:
to_visit.append((move, current_depth + 1))
visited.append(move)
return current_depth
def possible_moves(self, coords):
possible = []
row, col = coords
for row_i in range(row + 1, self.grid_size):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for row_i in range(row - 1, -1, -1):
if self.grid[row_i][col] == 'X':
break
else:
possible.append((row_i, col))
for col_i in range(col + 1, self.grid_size):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
for col_i in range(col - 1, -1, -1):
if self.grid[row][col_i] == 'X':
break
else:
possible.append((row, col_i))
return possible
if __name__ == '__main__':
grid_size = int(input())
grid = []
for _ in range(grid_size):
grid.append(list(input()))
coords = list(map(int, input().split()))
start = (coords[0], coords[1])
goal = (coords[2], coords[3])
castle_on_grid = CastleOnGrid(grid, grid_size, start, goal)
# print(castle_on_grid.possible_moves((1, 1)))
min_moves = castle_on_grid.min_moves()
print(min_moves)
|
aacfe5de01dd11486f7f39bb414c87853c8c8857 | likert_field/templatetags/likert_star_tools.py | likert_field/templatetags/likert_star_tools.py | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
| #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
| Use Dj provided compat tools | Use Dj provided compat tools
| Python | bsd-3-clause | kelvinwong-ca/django-likert-field,kelvinwong-ca/django-likert-field | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
Use Dj provided compat tools | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
| <commit_before>#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
<commit_msg>Use Dj provided compat tools<commit_after> | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
| #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
Use Dj provided compat tools#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
| <commit_before>#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
<commit_msg>Use Dj provided compat tools<commit_after>#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.six import string_types
def render_stars(num, max_stars, star_set):
"""
Star renderer returns a HTML string of stars
If num is None or a blank string, it returns the unanswered tag
Otherwise, the returned string will contain num solid stars
followed by max_stars - num empty stars
If num > max_stars, render max_stars solid stars
star_set is a dictionary of strings with keys: star, unlit, noanswer
"""
if num is None or (isinstance(num, string_types) and len(num) == 0):
return star_set['noanswer']
difference = int(max_stars) - int(num)
if difference < 0:
num = max_stars
difference = 0
return ''.join(
star_set['star'] * int(num) + star_set['unlit'] * difference)
|
5bbdfb6d38878e2e1688fe37415662ec0dc176ee | sphinxcontrib/openstreetmap.py | sphinxcontrib/openstreetmap.py | # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
| # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
if 'id' in self.options:
node['id'] = self.options['id']
else:
msg = ('openstreetmap directive needs uniqueue id for map data')
return [document.reporter.warning(msg, line=self.lineno)]
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
| Check whether required id parameter is specified | Check whether required id parameter is specified
| Python | bsd-2-clause | kenhys/sphinxcontrib-openstreetmap,kenhys/sphinxcontrib-openstreetmap | # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
Check whether required id parameter is specified | # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
if 'id' in self.options:
node['id'] = self.options['id']
else:
msg = ('openstreetmap directive needs uniqueue id for map data')
return [document.reporter.warning(msg, line=self.lineno)]
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
| <commit_before># -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
<commit_msg>Check whether required id parameter is specified<commit_after> | # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
if 'id' in self.options:
node['id'] = self.options['id']
else:
msg = ('openstreetmap directive needs uniqueue id for map data')
return [document.reporter.warning(msg, line=self.lineno)]
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
| # -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
Check whether required id parameter is specified# -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
if 'id' in self.options:
node['id'] = self.options['id']
else:
msg = ('openstreetmap directive needs uniqueue id for map data')
return [document.reporter.warning(msg, line=self.lineno)]
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
| <commit_before># -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
<commit_msg>Check whether required id parameter is specified<commit_after># -*- coding: utf-8 -*-
"""
sphinxcontrib.openstreetmap
===========================
Embed OpenStreetMap on your documentation.
:copyright: Copyright 2015 HAYASHI Kentaro <kenhys@gmail.com>
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class openstreetmap(nodes.General, nodes.Element):
pass
class OpenStreetMapDirective(Directive):
"""Directive for embedding OpenStreetMap"""
has_content = False
option_spec = {
'id': directives.unchanged,
'label': directives.unchanged,
'marker': directives.unchanged,
}
def run(self):
node = openstreetmap()
if 'id' in self.options:
node['id'] = self.options['id']
else:
msg = ('openstreetmap directive needs uniqueue id for map data')
return [document.reporter.warning(msg, line=self.lineno)]
return [node]
def visit_openstreetmap_node(self, node):
self.body.append("<div id='openstreetmap' style='color:red'>OpenStreetMap directive</div>")
def depart_openstreetmap_node(self, node):
pass
def setup(app):
app.add_node(openstreetmap,
html=(visit_openstreetmap_node, depart_openstreetmap_node))
app.add_directive('openstreetmap', OpenStreetMapDirective)
|
ac04cd5301d6aa4788fbd2d6bdaeb207f77a489e | alfred_listener/views.py | alfred_listener/views.py | from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository()
repository.name = hook_data['repo_name']
repository.user = hook_data['repo_user']
repository.url = hook_data['repo_url']
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit()
commit.repository_id = repository.id
commit.hash = hook_data['hash']
commit.ref = hook_data['ref']
commit.compare_url = hook_data['compare_url']
commit.committer_name = hook_data['committer_name']
commit.committer_email = hook_data['committer_email']
commit.message = hook_data['message']
db.session.add(commit)
db.session.commit()
return 'OK'
| from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository(
name=hook_data['repo_name'],
user=hook_data['repo_user'],
url=hook_data['repo_url']
)
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit(
repository_id=repository.id,
hash=hook_data['hash'],
ref=hook_data['ref'],
compare_url=hook_data['compare_url'],
committer_name=hook_data['committer_name'],
committer_email=hook_data['committer_email'],
message=hook_data['message']
)
db.session.add(commit)
db.session.commit()
return 'OK'
| Change the way to instaniate models | Change the way to instaniate models
| Python | isc | alfredhq/alfred-listener | from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository()
repository.name = hook_data['repo_name']
repository.user = hook_data['repo_user']
repository.url = hook_data['repo_url']
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit()
commit.repository_id = repository.id
commit.hash = hook_data['hash']
commit.ref = hook_data['ref']
commit.compare_url = hook_data['compare_url']
commit.committer_name = hook_data['committer_name']
commit.committer_email = hook_data['committer_email']
commit.message = hook_data['message']
db.session.add(commit)
db.session.commit()
return 'OK'
Change the way to instaniate models | from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository(
name=hook_data['repo_name'],
user=hook_data['repo_user'],
url=hook_data['repo_url']
)
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit(
repository_id=repository.id,
hash=hook_data['hash'],
ref=hook_data['ref'],
compare_url=hook_data['compare_url'],
committer_name=hook_data['committer_name'],
committer_email=hook_data['committer_email'],
message=hook_data['message']
)
db.session.add(commit)
db.session.commit()
return 'OK'
| <commit_before>from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository()
repository.name = hook_data['repo_name']
repository.user = hook_data['repo_user']
repository.url = hook_data['repo_url']
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit()
commit.repository_id = repository.id
commit.hash = hook_data['hash']
commit.ref = hook_data['ref']
commit.compare_url = hook_data['compare_url']
commit.committer_name = hook_data['committer_name']
commit.committer_email = hook_data['committer_email']
commit.message = hook_data['message']
db.session.add(commit)
db.session.commit()
return 'OK'
<commit_msg>Change the way to instaniate models<commit_after> | from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository(
name=hook_data['repo_name'],
user=hook_data['repo_user'],
url=hook_data['repo_url']
)
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit(
repository_id=repository.id,
hash=hook_data['hash'],
ref=hook_data['ref'],
compare_url=hook_data['compare_url'],
committer_name=hook_data['committer_name'],
committer_email=hook_data['committer_email'],
message=hook_data['message']
)
db.session.add(commit)
db.session.commit()
return 'OK'
| from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository()
repository.name = hook_data['repo_name']
repository.user = hook_data['repo_user']
repository.url = hook_data['repo_url']
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit()
commit.repository_id = repository.id
commit.hash = hook_data['hash']
commit.ref = hook_data['ref']
commit.compare_url = hook_data['compare_url']
commit.committer_name = hook_data['committer_name']
commit.committer_email = hook_data['committer_email']
commit.message = hook_data['message']
db.session.add(commit)
db.session.commit()
return 'OK'
Change the way to instaniate modelsfrom flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository(
name=hook_data['repo_name'],
user=hook_data['repo_user'],
url=hook_data['repo_url']
)
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit(
repository_id=repository.id,
hash=hook_data['hash'],
ref=hook_data['ref'],
compare_url=hook_data['compare_url'],
committer_name=hook_data['committer_name'],
committer_email=hook_data['committer_email'],
message=hook_data['message']
)
db.session.add(commit)
db.session.commit()
return 'OK'
| <commit_before>from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository()
repository.name = hook_data['repo_name']
repository.user = hook_data['repo_user']
repository.url = hook_data['repo_url']
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit()
commit.repository_id = repository.id
commit.hash = hook_data['hash']
commit.ref = hook_data['ref']
commit.compare_url = hook_data['compare_url']
commit.committer_name = hook_data['committer_name']
commit.committer_email = hook_data['committer_email']
commit.message = hook_data['message']
db.session.add(commit)
db.session.commit()
return 'OK'
<commit_msg>Change the way to instaniate models<commit_after>from flask import Blueprint, request, json
from alfred_db.models import Repository, Commit
from .database import db
from .helpers import parse_hook_data
webhooks = Blueprint('webhooks', __name__)
@webhooks.route('/', methods=['POST'])
def handler():
payload = request.form.get('payload', '')
try:
payload_data = json.loads(payload)
except ValueError:
return 'Bad request', 400
hook_data = parse_hook_data(payload_data)
repository = db.session.query(Repository).filter_by(
name=hook_data['repo_name'], user=hook_data['repo_user']
).first()
if repository is None:
repository = Repository(
name=hook_data['repo_name'],
user=hook_data['repo_user'],
url=hook_data['repo_url']
)
db.session.add(repository)
db.session.commit()
commit = db.session.query(Commit).filter_by(
hash=hook_data['hash'], repository_id=repository.id
).first()
if commit is None:
commit = Commit(
repository_id=repository.id,
hash=hook_data['hash'],
ref=hook_data['ref'],
compare_url=hook_data['compare_url'],
committer_name=hook_data['committer_name'],
committer_email=hook_data['committer_email'],
message=hook_data['message']
)
db.session.add(commit)
db.session.commit()
return 'OK'
|
bc4d11a6b5584975c3e9f16c2a4e25b26ff3da11 | mooc_aggregator_restful_api/mooc_aggregator.py | mooc_aggregator_restful_api/mooc_aggregator.py | '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
connect('moocs')
if __name__ == '__main__':
pass
| '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
from models import Mooc
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
#connect('moocs')
def update_database(self):
udacity_courses = self.udacity.mongofy_courses()
course = udacity_courses[0]
mooc = Mooc(course['mooc'], course['title'])
return len(udacity_courses)
if __name__ == '__main__':
mooc = MOOCAggregator()
print mooc.update_database()
| Add functionality to store courses in database | Add functionality to store courses in database
| Python | mit | ueg1990/mooc_aggregator_restful_api | '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
connect('moocs')
if __name__ == '__main__':
pass
Add functionality to store courses in database | '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
from models import Mooc
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
#connect('moocs')
def update_database(self):
udacity_courses = self.udacity.mongofy_courses()
course = udacity_courses[0]
mooc = Mooc(course['mooc'], course['title'])
return len(udacity_courses)
if __name__ == '__main__':
mooc = MOOCAggregator()
print mooc.update_database()
| <commit_before>'''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
connect('moocs')
if __name__ == '__main__':
pass
<commit_msg>Add functionality to store courses in database<commit_after> | '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
from models import Mooc
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
#connect('moocs')
def update_database(self):
udacity_courses = self.udacity.mongofy_courses()
course = udacity_courses[0]
mooc = Mooc(course['mooc'], course['title'])
return len(udacity_courses)
if __name__ == '__main__':
mooc = MOOCAggregator()
print mooc.update_database()
| '''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
connect('moocs')
if __name__ == '__main__':
pass
Add functionality to store courses in database'''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
from models import Mooc
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
#connect('moocs')
def update_database(self):
udacity_courses = self.udacity.mongofy_courses()
course = udacity_courses[0]
mooc = Mooc(course['mooc'], course['title'])
return len(udacity_courses)
if __name__ == '__main__':
mooc = MOOCAggregator()
print mooc.update_database()
| <commit_before>'''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
connect('moocs')
if __name__ == '__main__':
pass
<commit_msg>Add functionality to store courses in database<commit_after>'''
This module aggregates all the course information from different MOOC platforms
and stores them in the database (MongoDB)
'''
from mongoengine import connect
from flask.ext.mongoengine import MongoEngine
from udacity import UdacityAPI
from coursera import CourseraAPI
from models import Mooc
class MOOCAggregator(object):
'''
This class defines the attributes and methods for the MOOC aggregator
'''
MOOC_PLATFORMS = {'udacity', 'coursera'}
def __init__(self):
self.udacity = UdacityAPI()
self.coursera = CourseraAPI()
#connect('moocs')
def update_database(self):
udacity_courses = self.udacity.mongofy_courses()
course = udacity_courses[0]
mooc = Mooc(course['mooc'], course['title'])
return len(udacity_courses)
if __name__ == '__main__':
mooc = MOOCAggregator()
print mooc.update_database()
|
80933bcefcb686f7fa49cf91283b1f6a619d77c3 | examples/django_demo/generic_foreignkey/apps.py | examples/django_demo/generic_foreignkey/apps.py | from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForenikeyConfig(AppConfig):
name = 'generic_foreignkey'
| from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForeignKeyConfig(AppConfig):
name = 'generic_foreignkey'
| Fix typo: GenericForenikeyConfig → GenericForeignKeyConfig | Fix typo: GenericForenikeyConfig → GenericForeignKeyConfig
| Python | mit | FactoryBoy/factory_boy | from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForenikeyConfig(AppConfig):
name = 'generic_foreignkey'
Fix typo: GenericForenikeyConfig → GenericForeignKeyConfig | from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForeignKeyConfig(AppConfig):
name = 'generic_foreignkey'
| <commit_before>from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForenikeyConfig(AppConfig):
name = 'generic_foreignkey'
<commit_msg>Fix typo: GenericForenikeyConfig → GenericForeignKeyConfig<commit_after> | from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForeignKeyConfig(AppConfig):
name = 'generic_foreignkey'
| from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForenikeyConfig(AppConfig):
name = 'generic_foreignkey'
Fix typo: GenericForenikeyConfig → GenericForeignKeyConfigfrom __future__ import unicode_literals
from django.apps import AppConfig
class GenericForeignKeyConfig(AppConfig):
name = 'generic_foreignkey'
| <commit_before>from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForenikeyConfig(AppConfig):
name = 'generic_foreignkey'
<commit_msg>Fix typo: GenericForenikeyConfig → GenericForeignKeyConfig<commit_after>from __future__ import unicode_literals
from django.apps import AppConfig
class GenericForeignKeyConfig(AppConfig):
name = 'generic_foreignkey'
|
0ca4e0898fc6ee84109b458dfd505cdf42a5bae3 | tests/shared/core/test_constants.py | tests/shared/core/test_constants.py | from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[
(POLICY_NAME_RULE, RulePolicy),
(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),
],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
| from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
| Fix some code quality issues. | Fix some code quality issues.
| Python | apache-2.0 | RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu | from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[
(POLICY_NAME_RULE, RulePolicy),
(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),
],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
Fix some code quality issues. | from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
| <commit_before>from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[
(POLICY_NAME_RULE, RulePolicy),
(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),
],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
<commit_msg>Fix some code quality issues.<commit_after> | from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
| from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[
(POLICY_NAME_RULE, RulePolicy),
(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),
],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
Fix some code quality issues.from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
| <commit_before>from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[
(POLICY_NAME_RULE, RulePolicy),
(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),
],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
<commit_msg>Fix some code quality issues.<commit_after>from typing import Text, Type
import pytest
from rasa.core.policies.rule_policy import RulePolicy
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.shared.core.constants import (
CLASSIFIER_NAME_FALLBACK,
POLICY_NAME_RULE,
)
@pytest.mark.parametrize(
"name_in_constant, policy_class",
[(POLICY_NAME_RULE, RulePolicy), (CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_policy_names(name_in_constant: Text, policy_class: Type):
assert name_in_constant == policy_class.__name__
@pytest.mark.parametrize(
"name_in_constant, classifier_class",
[(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),],
)
def test_classifier_names(name_in_constant: Text, classifier_class: Type):
assert name_in_constant == classifier_class.__name__
|
0f2950fcb44efc9b629242743574af503e8230d4 | tip/algorithms/sorting/mergesort.py | tip/algorithms/sorting/mergesort.py | def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:m]), mergesort(list[m:]))
| def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:int(m)]), mergesort(list[int(m):]))
| Fix slices for Python 3 | Fix slices for Python 3
| Python | unlicense | davidgasquez/tip | def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:m]), mergesort(list[m:]))
Fix slices for Python 3 | def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:int(m)]), mergesort(list[int(m):]))
| <commit_before>def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:m]), mergesort(list[m:]))
<commit_msg>Fix slices for Python 3<commit_after> | def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:int(m)]), mergesort(list[int(m):]))
| def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:m]), mergesort(list[m:]))
Fix slices for Python 3def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:int(m)]), mergesort(list[int(m):]))
| <commit_before>def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:m]), mergesort(list[m:]))
<commit_msg>Fix slices for Python 3<commit_after>def merge(a, b):
if len(a) * len(b) == 0:
return a + b
v = (a[0] < b[0] and a or b).pop(0)
return [v] + merge(a, b)
def mergesort(list):
if len(list) < 2:
return list
m = len(list) / 2
return merge(mergesort(list[:int(m)]), mergesort(list[int(m):]))
|
cda7f19ac24fe9217e4f5a655de1a6085cf75b12 | api/users/serializers.py | api/users/serializers.py | from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
filterable_fields = frozenset(['bibliographic'])
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
| from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
local_filterable = frozenset(['bibliographic'])
filterable_fields = frozenset.union(UserSerializer.filterable_fields, local_filterable)
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
| Check for parent filterable fields as well as local filterable fields | Check for parent filterable fields as well as local filterable fields
| Python | apache-2.0 | Nesiehr/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,ZobairAlijan/osf.io,KAsante95/osf.io,barbour-em/osf.io,chennan47/osf.io,icereval/osf.io,chrisseto/osf.io,erinspace/osf.io,erinspace/osf.io,kwierman/osf.io,cslzchen/osf.io,KAsante95/osf.io,felliott/osf.io,cslzchen/osf.io,alexschiller/osf.io,jmcarp/osf.io,danielneis/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,KAsante95/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,icereval/osf.io,icereval/osf.io,brianjgeiger/osf.io,haoyuchen1992/osf.io,njantrania/osf.io,bdyetton/prettychart,pattisdr/osf.io,Nesiehr/osf.io,kwierman/osf.io,RomanZWang/osf.io,lyndsysimon/osf.io,rdhyee/osf.io,jinluyuan/osf.io,ckc6cz/osf.io,baylee-d/osf.io,mluo613/osf.io,zachjanicki/osf.io,acshi/osf.io,saradbowman/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,hmoco/osf.io,samchrisinger/osf.io,bdyetton/prettychart,ZobairAlijan/osf.io,billyhunt/osf.io,Ghalko/osf.io,reinaH/osf.io,abought/osf.io,GageGaskins/osf.io,abought/osf.io,binoculars/osf.io,jinluyuan/osf.io,DanielSBrown/osf.io,crcresearch/osf.io,laurenrevere/osf.io,brandonPurvis/osf.io,TomBaxter/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,rdhyee/osf.io,SSJohns/osf.io,dplorimer/osf,ckc6cz/osf.io,zachjanicki/osf.io,caseyrollins/osf.io,amyshi188/osf.io,cldershem/osf.io,petermalcolm/osf.io,jinluyuan/osf.io,mluo613/osf.io,jnayak1/osf.io,mfraezz/osf.io,bdyetton/prettychart,petermalcolm/osf.io,brandonPurvis/osf.io,chennan47/osf.io,sbt9uc/osf.io,felliott/osf.io,doublebits/osf.io,caseyrygt/osf.io,kch8qx/osf.io,zamattiac/osf.io,sloria/osf.io,cwisecarver/osf.io,sbt9uc/osf.io,brianjgeiger/osf.io,acshi/osf.io,Ghalko/osf.io,abought/osf.io,emetsger/osf.io,mluke93/osf.io,Johnetordoff/osf.io,RomanZWang/osf.io,chrisseto/osf.io,cosenal/osf.io,pattisdr/osf.io,adlius/osf.io,mattclark/osf.io,felliott/osf.io,amyshi188/osf.io,jeffreyliu3230/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,acshi/osf.io,TomBaxter/osf.io,fabianvf/osf.io,reinaH/osf.io,leb2dg/osf.io,cldershem/osf.io,dplorimer/osf,brandonPurvis/osf.io,zachjanicki/osf.io,GageGaskins/osf.io,jeffreyliu3230/osf.io,KAsante95/osf.io,HalcyonChimera/osf.io,samchrisinger/osf.io,zachjanicki/osf.io,RomanZWang/osf.io,binoculars/osf.io,caneruguz/osf.io,caseyrollins/osf.io,barbour-em/osf.io,jmcarp/osf.io,erinspace/osf.io,jnayak1/osf.io,caneruguz/osf.io,RomanZWang/osf.io,zamattiac/osf.io,jolene-esposito/osf.io,njantrania/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,cldershem/osf.io,alexschiller/osf.io,cslzchen/osf.io,TomHeatwole/osf.io,wearpants/osf.io,billyhunt/osf.io,SSJohns/osf.io,wearpants/osf.io,ticklemepierce/osf.io,HarryRybacki/osf.io,ticklemepierce/osf.io,Ghalko/osf.io,mattclark/osf.io,aaxelb/osf.io,sloria/osf.io,dplorimer/osf,caseyrygt/osf.io,leb2dg/osf.io,ckc6cz/osf.io,samchrisinger/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,GageGaskins/osf.io,DanielSBrown/osf.io,TomHeatwole/osf.io,zamattiac/osf.io,SSJohns/osf.io,cwisecarver/osf.io,hmoco/osf.io,doublebits/osf.io,abought/osf.io,zamattiac/osf.io,kch8qx/osf.io,adlius/osf.io,chrisseto/osf.io,doublebits/osf.io,leb2dg/osf.io,jolene-esposito/osf.io,emetsger/osf.io,fabianvf/osf.io,adlius/osf.io,arpitar/osf.io,leb2dg/osf.io,ZobairAlijan/osf.io,HarryRybacki/osf.io,Nesiehr/osf.io,njantrania/osf.io,doublebits/osf.io,mattclark/osf.io,asanfilippo7/osf.io,kch8qx/osf.io,laurenrevere/osf.io,jinluyuan/osf.io,amyshi188/osf.io,barbour-em/osf.io,emetsger/osf.io,crcresearch/osf.io,chrisseto/osf.io,wearpants/osf.io,alexschiller/osf.io,billyhunt/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,lyndsysimon/osf.io,monikagrabowska/osf.io,TomHeatwole/osf.io,reinaH/osf.io,caseyrygt/osf.io,mfraezz/osf.io,njantrania/osf.io,billyhunt/osf.io,crcresearch/osf.io,felliott/osf.io,HarryRybacki/osf.io,brandonPurvis/osf.io,HarryRybacki/osf.io,caseyrollins/osf.io,GageGaskins/osf.io,MerlinZhang/osf.io,CenterForOpenScience/osf.io,samanehsan/osf.io,samanehsan/osf.io,haoyuchen1992/osf.io,mluo613/osf.io,cslzchen/osf.io,MerlinZhang/osf.io,ticklemepierce/osf.io,TomHeatwole/osf.io,petermalcolm/osf.io,arpitar/osf.io,petermalcolm/osf.io,amyshi188/osf.io,aaxelb/osf.io,cosenal/osf.io,GageGaskins/osf.io,Ghalko/osf.io,CenterForOpenScience/osf.io,mluke93/osf.io,jolene-esposito/osf.io,caneruguz/osf.io,hmoco/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,kwierman/osf.io,samanehsan/osf.io,fabianvf/osf.io,doublebits/osf.io,acshi/osf.io,ZobairAlijan/osf.io,bdyetton/prettychart,danielneis/osf.io,aaxelb/osf.io,emetsger/osf.io,kwierman/osf.io,jnayak1/osf.io,mluke93/osf.io,reinaH/osf.io,pattisdr/osf.io,jeffreyliu3230/osf.io,jnayak1/osf.io,mluo613/osf.io,arpitar/osf.io,MerlinZhang/osf.io,sloria/osf.io,cldershem/osf.io,RomanZWang/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,adlius/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,jeffreyliu3230/osf.io,barbour-em/osf.io,hmoco/osf.io,acshi/osf.io,jolene-esposito/osf.io,fabianvf/osf.io,mluke93/osf.io,wearpants/osf.io,danielneis/osf.io,dplorimer/osf,kch8qx/osf.io,CenterForOpenScience/osf.io,sbt9uc/osf.io,MerlinZhang/osf.io,lyndsysimon/osf.io,KAsante95/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,HalcyonChimera/osf.io,rdhyee/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,caseyrygt/osf.io,alexschiller/osf.io,mluo613/osf.io,danielneis/osf.io,cosenal/osf.io,arpitar/osf.io,asanfilippo7/osf.io,cosenal/osf.io,samanehsan/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,jmcarp/osf.io,monikagrabowska/osf.io,ckc6cz/osf.io,jmcarp/osf.io,binoculars/osf.io,sbt9uc/osf.io,baylee-d/osf.io,mfraezz/osf.io | from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
filterable_fields = frozenset(['bibliographic'])
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
Check for parent filterable fields as well as local filterable fields | from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
local_filterable = frozenset(['bibliographic'])
filterable_fields = frozenset.union(UserSerializer.filterable_fields, local_filterable)
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
| <commit_before>from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
filterable_fields = frozenset(['bibliographic'])
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
<commit_msg>Check for parent filterable fields as well as local filterable fields<commit_after> | from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
local_filterable = frozenset(['bibliographic'])
filterable_fields = frozenset.union(UserSerializer.filterable_fields, local_filterable)
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
| from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
filterable_fields = frozenset(['bibliographic'])
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
Check for parent filterable fields as well as local filterable fieldsfrom rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
local_filterable = frozenset(['bibliographic'])
filterable_fields = frozenset.union(UserSerializer.filterable_fields, local_filterable)
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
| <commit_before>from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
filterable_fields = frozenset(['bibliographic'])
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
<commit_msg>Check for parent filterable fields as well as local filterable fields<commit_after>from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, LinksField, Link
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'fullname',
'given_name',
'middle_name',
'family_name',
'id'
])
id = ser.CharField(read_only=True, source='_id')
fullname = ser.CharField()
given_name = ser.CharField()
middle_name = ser.CharField(source='middle_names')
family_name = ser.CharField()
suffix = ser.CharField()
date_registered = ser.DateTimeField(read_only=True)
gravatar_url = ser.CharField()
employment_institutions = ser.ListField(source='jobs')
educational_institutions = ser.ListField(source='schools')
social_accounts = ser.DictField(source='social')
links = LinksField({
'html': 'absolute_url',
'nodes': {
'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'})
}
})
class Meta:
type_ = 'users'
def absolute_url(self, obj):
return obj.absolute_url
def update(self, instance, validated_data):
# TODO
pass
class ContributorSerializer(UserSerializer):
local_filterable = frozenset(['bibliographic'])
filterable_fields = frozenset.union(UserSerializer.filterable_fields, local_filterable)
bibliographic = ser.SerializerMethodField()
def get_bibliographic(self, obj):
node = self.context['view'].get_node()
return obj._id in node.visible_contributor_ids
|
85f9886e9a9cfddaa9cc8d90355647f5f8db53c4 | touchdown/tests/test_aws_ec2_ami.py | touchdown/tests/test_aws_ec2_ami.py | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 80)
| # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 160)
| Update tests to match new metadata | Update tests to match new metadata
| Python | apache-2.0 | yaybu/touchdown,mitchellrj/touchdown | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 80)
Update tests to match new metadata | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 160)
| <commit_before># Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 80)
<commit_msg>Update tests to match new metadata<commit_after> | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 160)
| # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 80)
Update tests to match new metadata# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 160)
| <commit_before># Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 80)
<commit_msg>Update tests to match new metadata<commit_after># Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from touchdown.aws.session import session
class TestMetadata(unittest.TestCase):
def test_waiter_waity_enough(self):
waiter = session.get_waiter_model("ec2")
self.assertEqual(waiter.get_waiter("ImageAvailable").max_attempts, 160)
|
58ce7f451263616c19599171305965b03807a853 | thinglang/foundation/definitions.py | thinglang/foundation/definitions.py | from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6
}
| from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6,
Identifier("Time"): 7,
Identifier("Exception"): 8
}
| Add new types to INTERNAL_TYPE_ORDERING | Add new types to INTERNAL_TYPE_ORDERING
| Python | mit | ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang | from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6
}
Add new types to INTERNAL_TYPE_ORDERING | from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6,
Identifier("Time"): 7,
Identifier("Exception"): 8
}
| <commit_before>from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6
}
<commit_msg>Add new types to INTERNAL_TYPE_ORDERING<commit_after> | from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6,
Identifier("Time"): 7,
Identifier("Exception"): 8
}
| from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6
}
Add new types to INTERNAL_TYPE_ORDERINGfrom thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6,
Identifier("Time"): 7,
Identifier("Exception"): 8
}
| <commit_before>from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6
}
<commit_msg>Add new types to INTERNAL_TYPE_ORDERING<commit_after>from thinglang.lexer.values.identifier import Identifier
"""
The internal ordering of core types used by the compiler and runtime
"""
INTERNAL_TYPE_ORDERING = {
Identifier("text"): 1,
Identifier("number"): 2,
Identifier("bool"): 3,
Identifier("list"): 4,
Identifier("Console"): 5,
Identifier("File"): 6,
Identifier("Time"): 7,
Identifier("Exception"): 8
}
|
aca7c4ef6998786abfd2119fee26e1d94d501c16 | _build/drake-build.py | _build/drake-build.py | #!/usr/bin/python
import sys
sys.path.append('../src')
import drake
drake.run('..')
| #!/usr/bin/python
import os
import sys
sys.path.append('../src')
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '../src:%s' % os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = '../src'
import drake
drake.run('..')
| Add drake in the PYTHONPATH for the tests. | Add drake in the PYTHONPATH for the tests.
| Python | agpl-3.0 | mefyl/drake,mefyl/drake,mefyl/drake,infinit/drake,infinit/drake,infinit/drake | #!/usr/bin/python
import sys
sys.path.append('../src')
import drake
drake.run('..')
Add drake in the PYTHONPATH for the tests. | #!/usr/bin/python
import os
import sys
sys.path.append('../src')
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '../src:%s' % os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = '../src'
import drake
drake.run('..')
| <commit_before>#!/usr/bin/python
import sys
sys.path.append('../src')
import drake
drake.run('..')
<commit_msg>Add drake in the PYTHONPATH for the tests.<commit_after> | #!/usr/bin/python
import os
import sys
sys.path.append('../src')
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '../src:%s' % os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = '../src'
import drake
drake.run('..')
| #!/usr/bin/python
import sys
sys.path.append('../src')
import drake
drake.run('..')
Add drake in the PYTHONPATH for the tests.#!/usr/bin/python
import os
import sys
sys.path.append('../src')
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '../src:%s' % os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = '../src'
import drake
drake.run('..')
| <commit_before>#!/usr/bin/python
import sys
sys.path.append('../src')
import drake
drake.run('..')
<commit_msg>Add drake in the PYTHONPATH for the tests.<commit_after>#!/usr/bin/python
import os
import sys
sys.path.append('../src')
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '../src:%s' % os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = '../src'
import drake
drake.run('..')
|
f751b95912b5cc03c3fd60742de9159b00d92b79 | echonest/utils.py | echonest/utils.py | from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(url)
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
| from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
| Fix bug in The Echo Nest API call | Fix bug in The Echo Nest API call
| Python | bsd-3-clause | FreeMusicNinja/api.freemusic.ninja | from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(url)
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
Fix bug in The Echo Nest API call | from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
| <commit_before>from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(url)
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
<commit_msg>Fix bug in The Echo Nest API call<commit_after> | from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
| from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(url)
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
Fix bug in The Echo Nest API callfrom django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
| <commit_before>from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(url)
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
<commit_msg>Fix bug in The Echo Nest API call<commit_after>from django.conf import settings
from purl import Template
import requests
from .models import SimilarResponse
API_URL = Template("http://developer.echonest.com/api/v4/artist/similar"
"?api_key=%s&results=100&name={name}"
% settings.ECHONEST_API_KEY)
def get_similar_from_api(name):
url = API_URL.expand({'name': name})
r = requests.get(str(url))
r.raise_for_status()
return SimilarResponse.objects.create(name=name, response=r.json())
def get_similar_from_db(name):
return SimilarResponse.objects.get(normalized_name=name.upper())
def get_similar(name):
try:
response = get_similar_from_db(name)
except SimilarResponse.DoesNotExist:
response = get_similar_from_api(name)
return response.artist_names
|
a4e9c3c47ff3999b56d769208106d3a605e1b50e | tests/test_client.py | tests/test_client.py | from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
balanced.configure('XXX')
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
| from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
| Fix 401 in test suite | Fix 401 in test suite | Python | mit | balanced/balanced-python,trenton42/txbalanced | from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
balanced.configure('XXX')
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
Fix 401 in test suite | from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
| <commit_before>from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
balanced.configure('XXX')
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
<commit_msg>Fix 401 in test suite<commit_after> | from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
| from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
balanced.configure('XXX')
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
Fix 401 in test suitefrom __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
| <commit_before>from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
balanced.configure('XXX')
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
<commit_msg>Fix 401 in test suite<commit_after>from __future__ import unicode_literals
import balanced
from . import utils
class TestClient(utils.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def test_configure(self):
expected_headers = {
'content-type': 'application/vnd.api+json;revision=1.1',
'accept': 'application/json;revision=1.1',
'User-Agent': u'balanced-python/1.0dev1'
}
self.assertDictContainsSubset(
expected_headers, balanced.config.client.config.headers
)
|
0c405fd30d059164509308dee604f50d9857202d | scripts/slave/recipe_modules/webrtc/resources/cleanup_files.py | scripts/slave/recipe_modules/webrtc/resources/cleanup_files.py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print >> sys.stderr, '%s does not exist or is not a directory!' % clean_dir
return 2
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print 'Cannot find any directory at %s. Skipping cleaning.' % clean_dir
return 0
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Make clean script return successful if clean dir is missing. | WebRTC: Make clean script return successful if clean dir is missing.
I thought it would be good to have the script fail for an
incorrectly specified directory, but the out/ dir is missing
on fresh installed slave machines, so better just skip cleaning
with a line printed to stdout instead when the dir is missing.
TESTED=Ran the script locally with an invalid dir argument.
Review URL: https://codereview.chromium.org/921633005
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@294057 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | eunchong/build,eunchong/build,eunchong/build,eunchong/build | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print >> sys.stderr, '%s does not exist or is not a directory!' % clean_dir
return 2
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
WebRTC: Make clean script return successful if clean dir is missing.
I thought it would be good to have the script fail for an
incorrectly specified directory, but the out/ dir is missing
on fresh installed slave machines, so better just skip cleaning
with a line printed to stdout instead when the dir is missing.
TESTED=Ran the script locally with an invalid dir argument.
Review URL: https://codereview.chromium.org/921633005
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@294057 0039d316-1c4b-4281-b951-d872f2087c98 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print 'Cannot find any directory at %s. Skipping cleaning.' % clean_dir
return 0
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| <commit_before>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print >> sys.stderr, '%s does not exist or is not a directory!' % clean_dir
return 2
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
<commit_msg>WebRTC: Make clean script return successful if clean dir is missing.
I thought it would be good to have the script fail for an
incorrectly specified directory, but the out/ dir is missing
on fresh installed slave machines, so better just skip cleaning
with a line printed to stdout instead when the dir is missing.
TESTED=Ran the script locally with an invalid dir argument.
Review URL: https://codereview.chromium.org/921633005
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@294057 0039d316-1c4b-4281-b951-d872f2087c98<commit_after> | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print 'Cannot find any directory at %s. Skipping cleaning.' % clean_dir
return 0
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print >> sys.stderr, '%s does not exist or is not a directory!' % clean_dir
return 2
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
WebRTC: Make clean script return successful if clean dir is missing.
I thought it would be good to have the script fail for an
incorrectly specified directory, but the out/ dir is missing
on fresh installed slave machines, so better just skip cleaning
with a line printed to stdout instead when the dir is missing.
TESTED=Ran the script locally with an invalid dir argument.
Review URL: https://codereview.chromium.org/921633005
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@294057 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print 'Cannot find any directory at %s. Skipping cleaning.' % clean_dir
return 0
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| <commit_before>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print >> sys.stderr, '%s does not exist or is not a directory!' % clean_dir
return 2
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
<commit_msg>WebRTC: Make clean script return successful if clean dir is missing.
I thought it would be good to have the script fail for an
incorrectly specified directory, but the out/ dir is missing
on fresh installed slave machines, so better just skip cleaning
with a line printed to stdout instead when the dir is missing.
TESTED=Ran the script locally with an invalid dir argument.
Review URL: https://codereview.chromium.org/921633005
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@294057 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that deletes all files (but not directories) in a given directory."""
import os
import sys
def main(args):
if not args or len(args) != 1:
print >> sys.stderr, 'Please specify a single directory as an argument.'
return 1
clean_dir = args[0]
if not os.path.isdir(clean_dir):
print 'Cannot find any directory at %s. Skipping cleaning.' % clean_dir
return 0
for filename in os.listdir(clean_dir):
file_path = os.path.join(clean_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
print 'Removed %s' % file_path
except OSError as e:
# Don't fail if we cannot delete a file.
print e
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
a0bc95155bc3aed691793afb8fec639661cb9d5b | polyaxon/polyaxon/config_settings/events_handlers/__init__.py | polyaxon/polyaxon/config_settings/events_handlers/__init__.py | from polyaxon.config_settings.volume_claims import *
from .apps import *
| from polyaxon.config_settings.volume_claims import *
from polyaxon.config_settings.spawner import *
from .apps import *
| Add spawner to event handlers | Add spawner to event handlers
| Python | apache-2.0 | polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon | from polyaxon.config_settings.volume_claims import *
from .apps import *
Add spawner to event handlers | from polyaxon.config_settings.volume_claims import *
from polyaxon.config_settings.spawner import *
from .apps import *
| <commit_before>from polyaxon.config_settings.volume_claims import *
from .apps import *
<commit_msg>Add spawner to event handlers<commit_after> | from polyaxon.config_settings.volume_claims import *
from polyaxon.config_settings.spawner import *
from .apps import *
| from polyaxon.config_settings.volume_claims import *
from .apps import *
Add spawner to event handlersfrom polyaxon.config_settings.volume_claims import *
from polyaxon.config_settings.spawner import *
from .apps import *
| <commit_before>from polyaxon.config_settings.volume_claims import *
from .apps import *
<commit_msg>Add spawner to event handlers<commit_after>from polyaxon.config_settings.volume_claims import *
from polyaxon.config_settings.spawner import *
from .apps import *
|
00bfddc9317660f6e7464288fc070d40a1ebad6b | server/constants.py | server/constants.py | """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
| """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
| Add /rq to banned course names | Add /rq to banned course names
| Python | apache-2.0 | Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok | """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
Add /rq to banned course names | """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
| <commit_before>"""App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
<commit_msg>Add /rq to banned course names<commit_after> | """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
| """App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
Add /rq to banned course names"""App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
| <commit_before>"""App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
<commit_msg>Add /rq to banned course names<commit_after>"""App constants"""
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
GRADE_TAGS = ['composition', 'correctness', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'private']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
COURSE_ENDPOINT_FORMAT = '^\w+/\w+/\w+$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
AUTOGRADER_URL = 'https://autograder.cs61a.org'
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
|
c5103eea181455afded264528bb97ac8a9982db0 | enable/__init__.py | enable/__init__.py | # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from __future__ import absolute_import
from ._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
| # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from enable._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
| Use an absolute import to avoid breaking the docs build. | Use an absolute import to avoid breaking the docs build.
| Python | bsd-3-clause | tommy-u/enable,tommy-u/enable,tommy-u/enable,tommy-u/enable | # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from __future__ import absolute_import
from ._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
Use an absolute import to avoid breaking the docs build. | # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from enable._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
| <commit_before># Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from __future__ import absolute_import
from ._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
<commit_msg>Use an absolute import to avoid breaking the docs build.<commit_after> | # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from enable._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
| # Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from __future__ import absolute_import
from ._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
Use an absolute import to avoid breaking the docs build.# Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from enable._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
| <commit_before># Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from __future__ import absolute_import
from ._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
<commit_msg>Use an absolute import to avoid breaking the docs build.<commit_after># Copyright (c) 2007-2014 by Enthought, Inc.
# All rights reserved.
""" A multi-platform object drawing library.
Part of the Enable project of the Enthought Tool Suite.
"""
from enable._version import full_version as __version__
__requires__ = [
'traitsui',
'PIL',
'kiwisolver',
]
|
926c4662c7b3059503bd0a22ee9624bb39ab40fd | sharepa/__init__.py | sharepa/__init__.py | from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.helpers import source_agg, source_counts
| from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
| Remove helper functions from sharepa init | Remove helper functions from sharepa init
| Python | mit | samanehsan/sharepa,erinspace/sharepa,CenterForOpenScience/sharepa,fabianvf/sharepa | from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.helpers import source_agg, source_counts
Remove helper functions from sharepa init | from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
| <commit_before>from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.helpers import source_agg, source_counts
<commit_msg>Remove helper functions from sharepa init<commit_after> | from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
| from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.helpers import source_agg, source_counts
Remove helper functions from sharepa initfrom sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
| <commit_before>from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
from sharepa.helpers import source_agg, source_counts
<commit_msg>Remove helper functions from sharepa init<commit_after>from sharepa.search import ShareSearch, basic_search # noqa
from sharepa.analysis import bucket_to_dataframe, merge_dataframes # noqa
|
658839e09d17180d9c6564af8fc92f120a4b5bd8 | parserscripts/collect_accessions.py | parserscripts/collect_accessions.py | # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file succesfully written out at:',args.o)
| # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file successfully written out at:',args.o)
| Fix typo: `succesfully` -> `successfully` | Fix typo: `succesfully` -> `successfully`
| Python | mit | mbonsma/phageParser,phageParser/phageParser,goyalsid/phageParser,mbonsma/phageParser,mbonsma/phageParser,goyalsid/phageParser,goyalsid/phageParser,phageParser/phageParser,phageParser/phageParser,phageParser/phageParser,mbonsma/phageParser | # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file succesfully written out at:',args.o)
Fix typo: `succesfully` -> `successfully` | # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file successfully written out at:',args.o)
| <commit_before># -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file succesfully written out at:',args.o)
<commit_msg>Fix typo: `succesfully` -> `successfully`<commit_after> | # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file successfully written out at:',args.o)
| # -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file succesfully written out at:',args.o)
Fix typo: `succesfully` -> `successfully`# -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file successfully written out at:',args.o)
| <commit_before># -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file succesfully written out at:',args.o)
<commit_msg>Fix typo: `succesfully` -> `successfully`<commit_after># -*- coding: utf-8 -*-
'''
Created on Tue Aug 15 11:26:23 2017
This script processes NCBI data exports (from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to extract lists of organism accession IDs, later used for downloading complete genome sequences.
@author: mbonsma / thisisjaid
'''
import os
import pandas as pd
import argparse
parser = argparse.ArgumentParser(add_help=True, description='''phageParser - collect_accessions.py -
This script processes NCBI data exports
(from ftp://ftp.ncbi.nlm.nih.gov/genomes/GENOME_REPORTS/IDS/)
to generate lists of organism accession IDs, later used for downloading
complete genome sequences''')
parser.add_argument('-o', metavar='output_file', action='store', type=str, default='accessions.csv',
help='Full path to results output file (default: accessions.csv)')
parser.add_argument('file', action='store', help='Full path to NCBI data export file')
args = parser.parse_args()
records = pd.read_csv(args.file,sep='\t', header=None)
records.to_csv(args.o,header=False,sep=',',columns=[1],index=False)
print('Accession file successfully written out at:',args.o)
|
2d534b7be13bda60646815e16a91e778da71c3f8 | auditlog/__manifest__.py | auditlog/__manifest__.py | # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
'pre_init_hook': 'pre_init_hook',
}
| # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
}
| Remove pre_init_hook reference from openerp, no pre_init hook exists any more | auditlog: Remove pre_init_hook reference from openerp, no pre_init hook exists any more
| Python | agpl-3.0 | Vauxoo/server-tools,Vauxoo/server-tools,Vauxoo/server-tools | # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
'pre_init_hook': 'pre_init_hook',
}
auditlog: Remove pre_init_hook reference from openerp, no pre_init hook exists any more | # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
}
| <commit_before># -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
'pre_init_hook': 'pre_init_hook',
}
<commit_msg>auditlog: Remove pre_init_hook reference from openerp, no pre_init hook exists any more<commit_after> | # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
}
| # -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
'pre_init_hook': 'pre_init_hook',
}
auditlog: Remove pre_init_hook reference from openerp, no pre_init hook exists any more# -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
}
| <commit_before># -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
'pre_init_hook': 'pre_init_hook',
}
<commit_msg>auditlog: Remove pre_init_hook reference from openerp, no pre_init hook exists any more<commit_after># -*- coding: utf-8 -*-
# © 2015 ABF OSIELL <http://osiell.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': "Audit Log",
'version': "9.0.1.0.0",
'author': "ABF OSIELL,Odoo Community Association (OCA)",
'license': "AGPL-3",
'website': "http://www.osiell.com",
'category': "Tools",
'depends': [
'base',
],
'data': [
'security/ir.model.access.csv',
'data/ir_cron.xml',
'views/auditlog_view.xml',
'views/http_session_view.xml',
'views/http_request_view.xml',
],
'images': [],
'application': True,
'installable': True,
}
|
b02c36bc23af41ee82414f36eb6cf20ffa5a4a46 | edx_data_research/reporting/basic/user_info.py | edx_data_research/reporting/basic/user_info.py | '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
username = edx_obj.collections['user_id_map'].find_one({'id' : user_id})['username']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, item['name'], final_grade, username, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID','Name', 'Final Grade', 'Username', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
| '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
user_id_map = edx_obj.collections['user_id_map'].find_one({'id' : user_id})
username = user_id_map['username']
hash_id = user_id_map['hash_id']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, username, hash_id, item['name'], final_grade, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except KeyError:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID', 'Username', 'User Hash ID', 'Name', 'Final Grade', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
| Add hash_id column to user-info output | Add hash_id column to user-info output
| Python | mit | McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research | '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
username = edx_obj.collections['user_id_map'].find_one({'id' : user_id})['username']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, item['name'], final_grade, username, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID','Name', 'Final Grade', 'Username', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
Add hash_id column to user-info output | '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
user_id_map = edx_obj.collections['user_id_map'].find_one({'id' : user_id})
username = user_id_map['username']
hash_id = user_id_map['hash_id']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, username, hash_id, item['name'], final_grade, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except KeyError:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID', 'Username', 'User Hash ID', 'Name', 'Final Grade', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
| <commit_before>'''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
username = edx_obj.collections['user_id_map'].find_one({'id' : user_id})['username']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, item['name'], final_grade, username, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID','Name', 'Final Grade', 'Username', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
<commit_msg>Add hash_id column to user-info output<commit_after> | '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
user_id_map = edx_obj.collections['user_id_map'].find_one({'id' : user_id})
username = user_id_map['username']
hash_id = user_id_map['hash_id']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, username, hash_id, item['name'], final_grade, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except KeyError:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID', 'Username', 'User Hash ID', 'Name', 'Final Grade', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
| '''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
username = edx_obj.collections['user_id_map'].find_one({'id' : user_id})['username']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, item['name'], final_grade, username, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID','Name', 'Final Grade', 'Username', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
Add hash_id column to user-info output'''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
user_id_map = edx_obj.collections['user_id_map'].find_one({'id' : user_id})
username = user_id_map['username']
hash_id = user_id_map['hash_id']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, username, hash_id, item['name'], final_grade, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except KeyError:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID', 'Username', 'User Hash ID', 'Name', 'Final Grade', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
| <commit_before>'''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
username = edx_obj.collections['user_id_map'].find_one({'id' : user_id})['username']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, item['name'], final_grade, username, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID','Name', 'Final Grade', 'Username', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
<commit_msg>Add hash_id column to user-info output<commit_after>'''
This module will retrieve info about students registered in the course
'''
def user_info(edx_obj):
edx_obj.collections = ['certificates_generatedcertificate', 'auth_userprofile','user_id_map','student_courseenrollment']
cursor = edx_obj.collections['auth_userprofile'].find()
result = []
for item in cursor:
user_id = item['user_id']
try:
final_grade = edx_obj.collections['certificates_generatedcertificate'].find_one({'user_id' : user_id})['grade']
user_id_map = edx_obj.collections['user_id_map'].find_one({'id' : user_id})
username = user_id_map['username']
hash_id = user_id_map['hash_id']
enrollment_date = edx_obj.collections['student_courseenrollment'].find_one({'user_id' : user_id})['created']
result.append([user_id, username, hash_id, item['name'], final_grade, item['gender'], item['year_of_birth'], item['level_of_education'], item['country'], item['city'], enrollment_date])
except KeyError:
print "Exception occurred for user_id {0}".format(user_id)
edx_obj.generate_csv(result, ['User ID', 'Username', 'User Hash ID', 'Name', 'Final Grade', 'Gender', 'Year of Birth', 'Level of Education', 'Country', 'City','Enrollment Date'], output_file=edx_obj.db.name+'_user_info.csv')
|
1607082857eb38da54d448ef1a6f48a8459d44a4 | imager/ImagerProfile/admin.py | imager/ImagerProfile/admin.py | from django.contrib import admin
from ImagerProfile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| from django.contrib import admin
from imagerprofile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| Change app name in import to lowercase | Change app name in import to lowercase
| Python | mit | nbeck90/django-imager,nbeck90/django-imager | from django.contrib import admin
from ImagerProfile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
Change app name in import to lowercase | from django.contrib import admin
from imagerprofile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| <commit_before>from django.contrib import admin
from ImagerProfile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
<commit_msg>Change app name in import to lowercase<commit_after> | from django.contrib import admin
from imagerprofile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| from django.contrib import admin
from ImagerProfile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
Change app name in import to lowercasefrom django.contrib import admin
from imagerprofile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| <commit_before>from django.contrib import admin
from ImagerProfile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
<commit_msg>Change app name in import to lowercase<commit_after>from django.contrib import admin
from imagerprofile.models import ImagerProfile
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# admin.site.unregister(User)
class ImagerUserInline(admin.StackedInline):
model = ImagerProfile
can_delete = False
verbose_name_plural = 'imager user'
class UserAdmin(UserAdmin):
inlines = (ImagerUserInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
8af2699f6d0f190081254243e2c90369b4541e34 | happening/db.py | happening/db.py | """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
| """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ or 'travis' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
| Make travis tests work with sites | Make travis tests work with sites
| Python | mit | happeninghq/happening,happeninghq/happening,jscott1989/happening,happeninghq/happening,jscott1989/happening,jscott1989/happening,jscott1989/happening,happeninghq/happening | """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
Make travis tests work with sites | """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ or 'travis' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
| <commit_before>""" Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
<commit_msg>Make travis tests work with sites<commit_after> | """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ or 'travis' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
| """ Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
Make travis tests work with sites""" Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ or 'travis' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
| <commit_before>""" Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
<commit_msg>Make travis tests work with sites<commit_after>""" Custom happening database manipulation. """
from django.contrib.sites.managers import CurrentSiteManager as Manager
from django.db import models
from django.contrib.sites.models import Site
from multihost import sites
import os
class Model(models.Model):
""" Custom model for use in happening.
Ensures that models are owned by a particular site.
"""
class Meta:
abstract = True
site = models.ForeignKey(Site)
objects = Manager()
def save(self, *args, **kwargs):
""" Ensure there is a site for this instance. """
if 'scdtest' in os.environ or 'travis' in os.environ:
self.site = Site.objects.first()
else:
self.site = sites.by_host()
return super(Model, self).save(*args, **kwargs)
|
db6222adea234921f82a843846778f5327566aaf | homebrew/logger.py | homebrew/logger.py | import logging
import sys
logger = logging.getLogger()
logFormatter = logging.Formatter("%(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
| import logging
import sys
logger = logging.getLogger()
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
| Rename variables used for setting up logging | Rename variables used for setting up logging
| Python | isc | igroen/homebrew | import logging
import sys
logger = logging.getLogger()
logFormatter = logging.Formatter("%(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
Rename variables used for setting up logging | import logging
import sys
logger = logging.getLogger()
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
| <commit_before>import logging
import sys
logger = logging.getLogger()
logFormatter = logging.Formatter("%(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
<commit_msg>Rename variables used for setting up logging<commit_after> | import logging
import sys
logger = logging.getLogger()
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
| import logging
import sys
logger = logging.getLogger()
logFormatter = logging.Formatter("%(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
Rename variables used for setting up loggingimport logging
import sys
logger = logging.getLogger()
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
| <commit_before>import logging
import sys
logger = logging.getLogger()
logFormatter = logging.Formatter("%(message)s")
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
<commit_msg>Rename variables used for setting up logging<commit_after>import logging
import sys
logger = logging.getLogger()
formatter = logging.Formatter("%(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
UNDERLINE_SYMBOL = "-"
def log_title(logline):
logger.info(logline)
logger.info(len(logline) * UNDERLINE_SYMBOL)
def log_blank_line():
logger.info("")
def log(
installed,
packages_not_needed_by_other,
packages_needed_by_other,
package_dependencies,
):
log_title("Installed packages:")
logger.info(", ".join(sorted(installed)))
log_blank_line()
log_title("No package depends on these packages:")
logger.info(", ".join(sorted(packages_not_needed_by_other)))
log_blank_line()
log_title("These packages are needed by other packages:")
for package, needed_by in sorted(packages_needed_by_other.items()):
logger.info("Package {} is needed by: {}".format(package, ", ".join(needed_by)))
log_blank_line()
log_title("These packages depend on other packages:")
for package, package_dependencies in sorted(package_dependencies.items()):
logger.info(
"Package {} depends on: {}".format(package, ", ".join(package_dependencies))
)
log_blank_line()
|
7558f0ed7c14cd4a1cfb87fdefc631adf8d1aff0 | server/dummy/dummy_server.py | server/dummy/dummy_server.py | #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
self.send_response(200)
self.end_headers()
varLen = int(self.headers['Content-Length'])
postVars = self.rfile.read(varLen)
print postVars
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
| #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '\n---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
content_length = int(self.headers['Content-Length'])
content = self.rfile.read(content_length)
print 'content:\n\n', content, '\n'
self.send_response(200)
self.end_headers()
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
| Clean up post formatting and response code | Clean up post formatting and response code
| Python | mit | jonspeicher/Puddle,jonspeicher/Puddle,jonspeicher/Puddle | #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
self.send_response(200)
self.end_headers()
varLen = int(self.headers['Content-Length'])
postVars = self.rfile.read(varLen)
print postVars
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
Clean up post formatting and response code | #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '\n---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
content_length = int(self.headers['Content-Length'])
content = self.rfile.read(content_length)
print 'content:\n\n', content, '\n'
self.send_response(200)
self.end_headers()
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
| <commit_before>#!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
self.send_response(200)
self.end_headers()
varLen = int(self.headers['Content-Length'])
postVars = self.rfile.read(varLen)
print postVars
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
<commit_msg>Clean up post formatting and response code<commit_after> | #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '\n---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
content_length = int(self.headers['Content-Length'])
content = self.rfile.read(content_length)
print 'content:\n\n', content, '\n'
self.send_response(200)
self.end_headers()
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
| #!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
self.send_response(200)
self.end_headers()
varLen = int(self.headers['Content-Length'])
postVars = self.rfile.read(varLen)
print postVars
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
Clean up post formatting and response code#!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '\n---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
content_length = int(self.headers['Content-Length'])
content = self.rfile.read(content_length)
print 'content:\n\n', content, '\n'
self.send_response(200)
self.end_headers()
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
| <commit_before>#!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
self.send_response(200)
self.end_headers()
varLen = int(self.headers['Content-Length'])
postVars = self.rfile.read(varLen)
print postVars
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
<commit_msg>Clean up post formatting and response code<commit_after>#!/usr/bin/env python
import BaseHTTPServer
ServerClass = BaseHTTPServer.HTTPServer
RequestHandlerClass = BaseHTTPServer.BaseHTTPRequestHandler
SERVER_NAME = ''
SERVER_PORT = 9000
class JsonPostResponder(RequestHandlerClass):
def do_POST(self):
print '\n---> dummy server: got post!'
print 'command:', self.command
print 'path:', self.path
print 'headers:\n\n', self.headers
content_length = int(self.headers['Content-Length'])
content = self.rfile.read(content_length)
print 'content:\n\n', content, '\n'
self.send_response(200)
self.end_headers()
server_address = (SERVER_NAME, SERVER_PORT)
httpd = BaseHTTPServer.HTTPServer(server_address, JsonPostResponder)
httpd.serve_forever()
|
49015e6d1a4c8670172ea00776e168f6cec0092b | openedx/core/djangoapps/user_api/accounts/forms.py | openedx/core/djangoapps/user_api/accounts/forms.py | """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
# Reset users password so they can request a password reset and log in again.
retirement.user.set_password(generate_password(length=25))
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| Reset learners password when user is unGDPRed/unretired via django admin. | Reset learners password when user is unGDPRed/unretired via django admin.
| Python | agpl-3.0 | cpennington/edx-platform,a-parhom/edx-platform,philanthropy-u/edx-platform,arbrandes/edx-platform,msegado/edx-platform,eduNEXT/edunext-platform,ESOedX/edx-platform,philanthropy-u/edx-platform,jolyonb/edx-platform,jolyonb/edx-platform,edx/edx-platform,stvstnfrd/edx-platform,EDUlib/edx-platform,edx/edx-platform,angelapper/edx-platform,ESOedX/edx-platform,a-parhom/edx-platform,mitocw/edx-platform,arbrandes/edx-platform,jolyonb/edx-platform,msegado/edx-platform,arbrandes/edx-platform,stvstnfrd/edx-platform,angelapper/edx-platform,angelapper/edx-platform,msegado/edx-platform,a-parhom/edx-platform,cpennington/edx-platform,eduNEXT/edx-platform,philanthropy-u/edx-platform,edx/edx-platform,cpennington/edx-platform,ESOedX/edx-platform,appsembler/edx-platform,edx-solutions/edx-platform,a-parhom/edx-platform,EDUlib/edx-platform,msegado/edx-platform,EDUlib/edx-platform,stvstnfrd/edx-platform,eduNEXT/edunext-platform,mitocw/edx-platform,mitocw/edx-platform,mitocw/edx-platform,edx-solutions/edx-platform,eduNEXT/edx-platform,stvstnfrd/edx-platform,angelapper/edx-platform,appsembler/edx-platform,appsembler/edx-platform,msegado/edx-platform,jolyonb/edx-platform,edx-solutions/edx-platform,eduNEXT/edx-platform,edx/edx-platform,ESOedX/edx-platform,edx-solutions/edx-platform,appsembler/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,eduNEXT/edx-platform,eduNEXT/edunext-platform,cpennington/edx-platform,eduNEXT/edunext-platform,philanthropy-u/edx-platform | """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
Reset learners password when user is unGDPRed/unretired via django admin. | """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
# Reset users password so they can request a password reset and log in again.
retirement.user.set_password(generate_password(length=25))
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| <commit_before>"""
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
<commit_msg>Reset learners password when user is unGDPRed/unretired via django admin.<commit_after> | """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
# Reset users password so they can request a password reset and log in again.
retirement.user.set_password(generate_password(length=25))
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| """
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
Reset learners password when user is unGDPRed/unretired via django admin."""
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
# Reset users password so they can request a password reset and log in again.
retirement.user.set_password(generate_password(length=25))
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
| <commit_before>"""
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
<commit_msg>Reset learners password when user is unGDPRed/unretired via django admin.<commit_after>"""
Django forms for accounts
"""
from django import forms
from django.core.exceptions import ValidationError
from openedx.core.djangoapps.user_api.accounts.utils import generate_password
class RetirementQueueDeletionForm(forms.Form):
"""
Admin form to facilitate learner retirement cancellation
"""
cancel_retirement = forms.BooleanField(required=True)
def save(self, retirement):
"""
When the form is POSTed we double-check the retirment status
and perform the necessary steps to cancel the retirement
request.
"""
if retirement.current_state.state_name != 'PENDING':
self.add_error(
None,
# Translators: 'current_state' is a string from an enumerated list indicating the learner's retirement
# state. Example: FORUMS_COMPLETE
"Retirement requests can only be cancelled for users in the PENDING state."
" Current request state for '{original_username}': {current_state}".format(
original_username=retirement.original_username,
current_state=retirement.current_state.state_name
)
)
raise ValidationError('Retirement is in the wrong state!')
# Load the user record using the retired email address -and- change the email address back.
retirement.user.email = retirement.original_email
# Reset users password so they can request a password reset and log in again.
retirement.user.set_password(generate_password(length=25))
retirement.user.save()
# Delete the user retirement status record.
# No need to delete the accompanying "permanent" retirement request record - it gets done via Django signal.
retirement.delete()
|
d66a412efad62d47e7df8d2ff4922be4c268a93e | hunittest/utils.py | hunittest/utils.py | # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
| # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import unittest
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
def load_single_test_case(test_name):
test_suite = list(unittest.defaultTestLoader.loadTestsFromName(test_name))
assert len(test_suite) == 1
return test_suite[0]
| Add helper to load a single test case. | Add helper to load a single test case.
| Python | bsd-2-clause | nicolasdespres/hunittest | # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
Add helper to load a single test case. | # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import unittest
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
def load_single_test_case(test_name):
test_suite = list(unittest.defaultTestLoader.loadTestsFromName(test_name))
assert len(test_suite) == 1
return test_suite[0]
| <commit_before># -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
<commit_msg>Add helper to load a single test case.<commit_after> | # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import unittest
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
def load_single_test_case(test_name):
test_suite = list(unittest.defaultTestLoader.loadTestsFromName(test_name))
assert len(test_suite) == 1
return test_suite[0]
| # -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
Add helper to load a single test case.# -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import unittest
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
def load_single_test_case(test_name):
test_suite = list(unittest.defaultTestLoader.loadTestsFromName(test_name))
assert len(test_suite) == 1
return test_suite[0]
| <commit_before># -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
<commit_msg>Add helper to load a single test case.<commit_after># -*- encoding: utf-8 -*-
"""Utility routines
"""
import os
import re
from enum import Enum
from contextlib import contextmanager
import unittest
def pyname_join(seq):
return ".".join(seq)
def is_pkgdir(dirpath):
return os.path.isdir(dirpath) \
and os.path.isfile(os.path.join(dirpath, "__init__.py"))
def mod_split(modname):
mo = re.match(r"^(.+)\.(.*)$", modname)
if not mo:
raise ValueError("invalid python path identifier")
return (mo.group(1), mo.group(2))
def is_empty_generator(generator):
try:
next(generator)
except StopIteration:
return True
else:
return False
class AutoEnum(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def mkdir_p(path):
try:
os.makedirs(path)
except FileExistsError:
pass
@contextmanager
def protect_cwd(dirpath=None):
saved_cwd = os.getcwd()
if dirpath is not None:
os.chdir(dirpath)
try:
yield
finally:
os.chdir(saved_cwd)
def safe_getcwd():
try:
return os.getcwd()
except FileNotFoundError:
return None
def load_single_test_case(test_name):
test_suite = list(unittest.defaultTestLoader.loadTestsFromName(test_name))
assert len(test_suite) == 1
return test_suite[0]
|
4de3b357101b3c304a6d89fd02175156ffecc656 | src/mailme/constants.py | src/mailme/constants.py |
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
|
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'sent items': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
| Add 'sent items' to default folder mapping | Add 'sent items' to default folder mapping
| Python | bsd-3-clause | mailme/mailme,mailme/mailme |
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
Add 'sent items' to default folder mapping |
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'sent items': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
| <commit_before>
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
<commit_msg>Add 'sent items' to default folder mapping<commit_after> |
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'sent items': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
|
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
Add 'sent items' to default folder mapping
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'sent items': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
| <commit_before>
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
<commit_msg>Add 'sent items' to default folder mapping<commit_after>
# Folder name mappings, based on http://tools.ietf.org/html/rfc6154
INBOX = 'inbox'
DRAFTS = 'drafts'
SPAM = 'spam'
ARCHIVE = 'archive'
SENT = 'sent'
TRASH = 'trash'
ALL = 'all'
IMPORTANT = 'important'
# Default mapping to unify various provider behaviors
DEFAULT_FOLDER_MAPPING = {
'inbox': INBOX,
'drafts': DRAFTS,
'draft': DRAFTS,
'junk': SPAM,
'spam': SPAM,
'archive': ARCHIVE,
'sent': SENT,
'sent items': SENT,
'trash': TRASH,
'all': ALL,
'important': IMPORTANT,
}
DEFAULT_FOLDER_FLAGS = {
'\\Trash': 'trash',
'\\Sent': 'sent',
'\\Drafts': 'drafts',
'\\Junk': 'spam',
'\\Inbox': 'inbox',
'\\Spam': 'spam'
}
|
75632e699b6b83eba3d87506b2fed2de45f695bc | ai/STA/Strategy/stay_away.py | ai/STA/Strategy/stay_away.py | # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
roles_to_consider = [Role.FIRST_ATTACK, Role.SECOND_ATTACK, Role.MIDDLE,
Role.FIRST_DEFENCE, Role.SECOND_DEFENCE, Role.GOALKEEPER]
role_by_robots = [(i, self.game_state.get_player_by_role(i)) for i in roles_to_consider]
for index, player in role_by_robots:
if player:
self.add_tactic(index, StayAwayFromBall(self.game_state, player))
| # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
for r in Role:
p = self.game_state.get_player_by_role(r)
if p:
self.add_tactic(r, StayAwayFromBall(self.game_state, p)) | Fix StayAway strat for pull request | Fix StayAway strat for pull request
| Python | mit | RoboCupULaval/StrategyIA,RoboCupULaval/StrategyIA | # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
roles_to_consider = [Role.FIRST_ATTACK, Role.SECOND_ATTACK, Role.MIDDLE,
Role.FIRST_DEFENCE, Role.SECOND_DEFENCE, Role.GOALKEEPER]
role_by_robots = [(i, self.game_state.get_player_by_role(i)) for i in roles_to_consider]
for index, player in role_by_robots:
if player:
self.add_tactic(index, StayAwayFromBall(self.game_state, player))
Fix StayAway strat for pull request | # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
for r in Role:
p = self.game_state.get_player_by_role(r)
if p:
self.add_tactic(r, StayAwayFromBall(self.game_state, p)) | <commit_before># Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
roles_to_consider = [Role.FIRST_ATTACK, Role.SECOND_ATTACK, Role.MIDDLE,
Role.FIRST_DEFENCE, Role.SECOND_DEFENCE, Role.GOALKEEPER]
role_by_robots = [(i, self.game_state.get_player_by_role(i)) for i in roles_to_consider]
for index, player in role_by_robots:
if player:
self.add_tactic(index, StayAwayFromBall(self.game_state, player))
<commit_msg>Fix StayAway strat for pull request<commit_after> | # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
for r in Role:
p = self.game_state.get_player_by_role(r)
if p:
self.add_tactic(r, StayAwayFromBall(self.game_state, p)) | # Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
roles_to_consider = [Role.FIRST_ATTACK, Role.SECOND_ATTACK, Role.MIDDLE,
Role.FIRST_DEFENCE, Role.SECOND_DEFENCE, Role.GOALKEEPER]
role_by_robots = [(i, self.game_state.get_player_by_role(i)) for i in roles_to_consider]
for index, player in role_by_robots:
if player:
self.add_tactic(index, StayAwayFromBall(self.game_state, player))
Fix StayAway strat for pull request# Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
for r in Role:
p = self.game_state.get_player_by_role(r)
if p:
self.add_tactic(r, StayAwayFromBall(self.game_state, p)) | <commit_before># Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
roles_to_consider = [Role.FIRST_ATTACK, Role.SECOND_ATTACK, Role.MIDDLE,
Role.FIRST_DEFENCE, Role.SECOND_DEFENCE, Role.GOALKEEPER]
role_by_robots = [(i, self.game_state.get_player_by_role(i)) for i in roles_to_consider]
for index, player in role_by_robots:
if player:
self.add_tactic(index, StayAwayFromBall(self.game_state, player))
<commit_msg>Fix StayAway strat for pull request<commit_after># Under MIT license, see LICENSE.txt
from ai.STA.Tactic.stay_away_from_ball import StayAwayFromBall
from ai.STA.Strategy.Strategy import Strategy
from ai.Util.role import Role
class StayAway(Strategy):
def __init__(self, p_game_state):
super().__init__(p_game_state)
for r in Role:
p = self.game_state.get_player_by_role(r)
if p:
self.add_tactic(r, StayAwayFromBall(self.game_state, p)) |
af56c549a8eae5ebb0d124e2bb397241f11e47af | indico/__init__.py | indico/__init__.py | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
_patch_wtforms_sqlalchemy()
del _patch_wtforms_sqlalchemy
| # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| Fix pip install failing due to wtforms monkeypatch | Fix pip install failing due to wtforms monkeypatch
| Python | mit | OmeGak/indico,mvidalgarcia/indico,mvidalgarcia/indico,indico/indico,ThiefMaster/indico,DirkHoffmann/indico,OmeGak/indico,pferreir/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,DirkHoffmann/indico,ThiefMaster/indico,indico/indico,ThiefMaster/indico,DirkHoffmann/indico,mic4ael/indico,mic4ael/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,DirkHoffmann/indico,indico/indico,OmeGak/indico,indico/indico,mic4ael/indico,pferreir/indico,mic4ael/indico | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
_patch_wtforms_sqlalchemy()
del _patch_wtforms_sqlalchemy
Fix pip install failing due to wtforms monkeypatch | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| <commit_before># This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
_patch_wtforms_sqlalchemy()
del _patch_wtforms_sqlalchemy
<commit_msg>Fix pip install failing due to wtforms monkeypatch<commit_after> | # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| # This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
_patch_wtforms_sqlalchemy()
del _patch_wtforms_sqlalchemy
Fix pip install failing due to wtforms monkeypatch# This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
| <commit_before># This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
_patch_wtforms_sqlalchemy()
del _patch_wtforms_sqlalchemy
<commit_msg>Fix pip install failing due to wtforms monkeypatch<commit_after># This file is part of Indico.
# Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.1-dev'
register_custom_mimetypes()
# monkeypatch for https://github.com/wtforms/wtforms/issues/373
def _patch_wtforms_sqlalchemy():
from wtforms.ext.sqlalchemy import fields
from sqlalchemy.orm.util import identity_key
def get_pk_from_identity(obj):
key = identity_key(instance=obj)[1]
return u':'.join(map(unicode, key))
fields.get_pk_from_identity = get_pk_from_identity
try:
_patch_wtforms_sqlalchemy()
except ImportError as exc:
# pip seems to run this sometimes while uninstalling an old sqlalchemy version
print 'Could not monkeypatch wtforms', exc
finally:
del _patch_wtforms_sqlalchemy
|
275301d7a2c2e8c44ff1cfb3d49d9388f9531b56 | invalidate_data.py | invalidate_data.py | #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
failed_trials = db_client.trials_collection.find({'success': False}, {'_id': True})
for trial_id in failed_trials:
logging.getLogger(__name__).info("Invalidating failed trial {0}".format(trial_id['_id']))
batch_analysis.invalidate.invalidate_trial_result(db_client, trial_id['_id'])
if __name__ == '__main__':
main()
| Update invalidate to remove failed trials. | Update invalidate to remove failed trials.
| Python | bsd-2-clause | jskinn/robot-vision-experiment-framework,jskinn/robot-vision-experiment-framework | #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
if __name__ == '__main__':
main()
Update invalidate to remove failed trials. | #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
failed_trials = db_client.trials_collection.find({'success': False}, {'_id': True})
for trial_id in failed_trials:
logging.getLogger(__name__).info("Invalidating failed trial {0}".format(trial_id['_id']))
batch_analysis.invalidate.invalidate_trial_result(db_client, trial_id['_id'])
if __name__ == '__main__':
main()
| <commit_before>#!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
if __name__ == '__main__':
main()
<commit_msg>Update invalidate to remove failed trials.<commit_after> | #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
failed_trials = db_client.trials_collection.find({'success': False}, {'_id': True})
for trial_id in failed_trials:
logging.getLogger(__name__).info("Invalidating failed trial {0}".format(trial_id['_id']))
batch_analysis.invalidate.invalidate_trial_result(db_client, trial_id['_id'])
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
if __name__ == '__main__':
main()
Update invalidate to remove failed trials.#!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
failed_trials = db_client.trials_collection.find({'success': False}, {'_id': True})
for trial_id in failed_trials:
logging.getLogger(__name__).info("Invalidating failed trial {0}".format(trial_id['_id']))
batch_analysis.invalidate.invalidate_trial_result(db_client, trial_id['_id'])
if __name__ == '__main__':
main()
| <commit_before>#!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
if __name__ == '__main__':
main()
<commit_msg>Update invalidate to remove failed trials.<commit_after>#!/usr/bin/env python3
# Copyright (c) 2017, John Skinner
import sys
import logging
import logging.config
import config.global_configuration as global_conf
import database.client
import batch_analysis.invalidate
def main():
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
config = global_conf.load_global_config('config.yml')
if __name__ == '__main__':
# Only configure the logging if this is the main function, don't reconfigure
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
orbslam_ids = db_client.system_collection.find({'_type': 'systems.slam.orbslam2.ORBSLAM2'}, {'_id': True})
for system_id in orbslam_ids:
logging.getLogger(__name__).info("Invalidating system {0}".format(system_id['_id']))
batch_analysis.invalidate.invalidate_system(db_client, system_id['_id'])
failed_trials = db_client.trials_collection.find({'success': False}, {'_id': True})
for trial_id in failed_trials:
logging.getLogger(__name__).info("Invalidating failed trial {0}".format(trial_id['_id']))
batch_analysis.invalidate.invalidate_trial_result(db_client, trial_id['_id'])
if __name__ == '__main__':
main()
|
ec28eadeab215533cec1b14b627e2793aa7f4f31 | tt_dailyemailblast/send_backends/sync.py | tt_dailyemailblast/send_backends/sync.py | from . import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
| from .. import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
| Fix email module could not be imported | Fix email module could not be imported
This helpful message alerted me to this problem:
ImproperlyConfigured: <django.utils.functional.__proxy__ object at
0x10b023990>
| Python | apache-2.0 | texastribune/tt_dailyemailblast,texastribune/tt_dailyemailblast | from . import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
Fix email module could not be imported
This helpful message alerted me to this problem:
ImproperlyConfigured: <django.utils.functional.__proxy__ object at
0x10b023990> | from .. import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
| <commit_before>from . import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
<commit_msg>Fix email module could not be imported
This helpful message alerted me to this problem:
ImproperlyConfigured: <django.utils.functional.__proxy__ object at
0x10b023990><commit_after> | from .. import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
| from . import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
Fix email module could not be imported
This helpful message alerted me to this problem:
ImproperlyConfigured: <django.utils.functional.__proxy__ object at
0x10b023990>from .. import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
| <commit_before>from . import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
<commit_msg>Fix email module could not be imported
This helpful message alerted me to this problem:
ImproperlyConfigured: <django.utils.functional.__proxy__ object at
0x10b023990><commit_after>from .. import email
def sync_daily_email_blasts(blast):
for l in blast.recipients_lists.all():
l.send(blast)
def sync_recipients_list(recipients_list, blast):
for r in recipients_list.recipientss.all():
r.send(recipients_list, blast)
def sync_recipient(recipient, recipients_list, blast):
email.send_email(blast.render(recipient, recipients_list))
|
4946adddd889db89d65764f3a680ccc6853ea949 | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'www.odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
| # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Hidden/Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'http://odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
| Update category and website on module description | Update category and website on module description
| Python | agpl-3.0 | xcgd/document_attachment | # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'www.odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
Update category and website on module description | # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Hidden/Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'http://odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
| <commit_before># -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'www.odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
<commit_msg>Update category and website on module description<commit_after> | # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Hidden/Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'http://odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
| # -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'www.odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
Update category and website on module description# -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Hidden/Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'http://odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
| <commit_before># -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'www.odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
<commit_msg>Update category and website on module description<commit_after># -*- coding: utf-8 -*-
{
'name': 'Document Attachment',
'version': '1.2',
'author': 'XCG Consulting',
'category': 'Hidden/Dependency',
'description': """Enchancements to the ir.attachment module
to manage kinds of attachments that can be linked with OpenERP objects.
The implenter has to:
- Pass 'res_model' and 'res_id' in the context.
- Define menus and actions should it want to allow changing document types.
Document attachments are displayed in a many2many field; it can optionally be
changed to work like a one2many field by using the
"domain="[('res_id', '=', id)]" attribute.
""",
'website': 'http://odoo.consulting/',
'depends': [
'base',
'document',
],
'data': [
'security/ir.model.access.csv',
'document_attachment.xml',
],
'test': [
],
'installable': True,
}
|
9ee00a148763c7caac1ae0d7dcb3efa496121ee7 | lamana/__init__.py | lamana/__init__.py | # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
| # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11.dev0' # PEP 440 style
##__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
| Modify dev versioning; see PEP 440 | Modify dev versioning; see PEP 440
| Python | bsd-3-clause | par2/lamana | # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
Modify dev versioning; see PEP 440 | # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11.dev0' # PEP 440 style
##__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
| <commit_before># -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
<commit_msg>Modify dev versioning; see PEP 440<commit_after> | # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11.dev0' # PEP 440 style
##__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
| # -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
Modify dev versioning; see PEP 440# -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11.dev0' # PEP 440 style
##__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
| <commit_before># -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
<commit_msg>Modify dev versioning; see PEP 440<commit_after># -----------------------------------------------------------------------------
__title__ = 'lamana'
__version__ = '0.4.11.dev0' # PEP 440 style
##__version__ = '0.4.11-dev'
__author__ = 'P. Robinson II'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015, P. Robinson II'
import lamana.input_
import lamana.distributions
import lamana.constructs
import lamana.theories
import lamana.output_
#from lamana.models import *
#import lamana.ratios
#import lamana.predictions
#import lamana.gamuts
|
e6e40df6e23f6623c4672b9ec3aab982f5588c8c | downstream-farmer/client.py | downstream-farmer/client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
self.heartbeat = None
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) | Set heartbeat attrib in init | Set heartbeat attrib in init
| Python | mit | Storj/downstream-farmer | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges)Set heartbeat attrib in init | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
self.heartbeat = None
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) | <commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges)<commit_msg>Set heartbeat attrib in init<commit_after> | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
self.heartbeat = None
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges)Set heartbeat attrib in init#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
self.heartbeat = None
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) | <commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges)<commit_msg>Set heartbeat attrib in init<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import random
import hashlib
import requests
from heartbeat import Challenge, Heartbeat
from .utils import urlify
from .exc import DownstreamError
class DownstreamClient(object):
def __init__(self, server_url):
self.server = server_url.strip('/')
self.challenges = []
self.heartbeat = None
def connect(self, url):
raise NotImplementedError
def store_path(self, path):
raise NotImplementedError
def get_chunk(self, hash):
raise NotImplementedError
def challenge(self, hash, challenge):
raise NotImplementedError
def answer(self, hash, hash_answer):
raise NotImplementedError
def _enc_fname(self, filename):
return urlify(os.path.split(filename)[1])
def get_challenges(self, filename):
enc_fname = urlify(os.path.split(filename)[1])
url = '%s/api/downstream/challenge/%s' % (self.server, enc_fname)
resp = requests.get(url)
try:
resp.raise_for_status()
except Exception as e:
raise DownstreamError("Error connecting to downstream"
"-node:", e.message)
_json = resp.json()
for challenge in _json['challenges']:
chal = Challenge(challenge.get('block'), challenge.get('seed'))
self.challenges.append(chal)
def answer_challenge(self, filename):
enc_fname = self._enc_fname(filename)
raise NotImplementedError
def random_challenge(self):
random.choice(self.challenges) |
fa9577f875c999ea876c99e30614051f7ceba129 | authentication_app/models.py | authentication_app/models.py | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
| from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : AccountManager
@desc : AccountManager model. The AccountManager is responsible to manage
the creation of users and superusers.
'''
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email adress.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email = self.normalize_email(email), username= kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
| Add the account manager that support the creation of accounts. | Add the account manager that support the creation of accounts.
| Python | mit | mvpgomes/shopit-app,mvpgomes/shopit-app,mvpgomes/shopit-app,mvpgomes/shopit-app | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
Add the account manager that support the creation of accounts. | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : AccountManager
@desc : AccountManager model. The AccountManager is responsible to manage
the creation of users and superusers.
'''
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email adress.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email = self.normalize_email(email), username= kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
| <commit_before>from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
<commit_msg>Add the account manager that support the creation of accounts.<commit_after> | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : AccountManager
@desc : AccountManager model. The AccountManager is responsible to manage
the creation of users and superusers.
'''
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email adress.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email = self.normalize_email(email), username= kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
| from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
Add the account manager that support the creation of accounts.from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : AccountManager
@desc : AccountManager model. The AccountManager is responsible to manage
the creation of users and superusers.
'''
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email adress.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email = self.normalize_email(email), username= kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
| <commit_before>from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
<commit_msg>Add the account manager that support the creation of accounts.<commit_after>from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
'''
@name : AccountManager
@desc : AccountManager model. The AccountManager is responsible to manage
the creation of users and superusers.
'''
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email adress.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email = self.normalize_email(email), username= kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
'''
@name : Account
@desc : Account model. This model is generic to represent a user that has an
account in the ShopIT application. This user can be the store manager or the
mobile app user.
'''
class Account(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(max_length=50, unique=True)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
|
332ea322d55b2d0410db172616fe51ccd0de050d | create_coverage_database.py | create_coverage_database.py | #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect(row_factory=query.dict_factory)
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
| #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
| Move setting of row factory to after session object is created | Move setting of row factory to after session object is created
| Python | mit | GastonLab/ddb-datastore,dgaston/ddb-variantstore,dgaston/ddb-datastore,dgaston/ddbio-variantstore | #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect(row_factory=query.dict_factory)
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
Move setting of row factory to after session object is created | #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
| <commit_before>#!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect(row_factory=query.dict_factory)
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
<commit_msg>Move setting of row factory to after session object is created<commit_after> | #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
| #!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect(row_factory=query.dict_factory)
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
Move setting of row factory to after session object is created#!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
| <commit_before>#!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect(row_factory=query.dict_factory)
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
<commit_msg>Move setting of row factory to after session object is created<commit_after>#!/usr/bin/env python
import argparse
import getpass
from cassandra import query
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import create_keyspace_simple
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
from coveragestore import SampleCoverage
from coveragestore import AmpliconCoverage
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-r', '--replication_factor', help="Cassandra replication factor", default=3)
args = parser.parse_args()
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
cluster = Cluster([args.address], auth_provider=auth_provider)
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address], None, auth_provider=auth_provider)
else:
cluster = Cluster([args.address])
session = cluster.connect()
session.row_factory=query.dict_factory
# connection.setup([args.address])
connection.set_session(session)
create_keyspace_simple("coveragestore", None, args.replication_factor)
sync_table(SampleCoverage)
sync_table(AmpliconCoverage)
|
1fe4a129ba96f14dc91832754de00271a29f48ca | tests/tools/test_foodcritic.py | tests/tools/test_foodcritic.py | from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from unittest import TestCase
from nose.tools import eq_
class TestFoodcritic(TestCase):
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
| from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from lintreview.utils import in_path
from unittest import TestCase, skipIf
from nose.tools import eq_
critic_missing = not(in_path('foodcritic'))
class TestFoodcritic(TestCase):
needs_critic = skipIf(critic_missing, 'Missing foodcritic, cannot run')
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
@needs_critic
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
@needs_critic
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
| Fix tests failing when foodcritic is not installed. | Fix tests failing when foodcritic is not installed.
Tool tests should skip, not fail.
| Python | mit | markstory/lint-review,adrianmoisey/lint-review,zoidbergwill/lint-review,zoidbergwill/lint-review,markstory/lint-review,zoidbergwill/lint-review,adrianmoisey/lint-review,markstory/lint-review | from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from unittest import TestCase
from nose.tools import eq_
class TestFoodcritic(TestCase):
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
Fix tests failing when foodcritic is not installed.
Tool tests should skip, not fail. | from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from lintreview.utils import in_path
from unittest import TestCase, skipIf
from nose.tools import eq_
critic_missing = not(in_path('foodcritic'))
class TestFoodcritic(TestCase):
needs_critic = skipIf(critic_missing, 'Missing foodcritic, cannot run')
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
@needs_critic
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
@needs_critic
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
| <commit_before>from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from unittest import TestCase
from nose.tools import eq_
class TestFoodcritic(TestCase):
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
<commit_msg>Fix tests failing when foodcritic is not installed.
Tool tests should skip, not fail.<commit_after> | from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from lintreview.utils import in_path
from unittest import TestCase, skipIf
from nose.tools import eq_
critic_missing = not(in_path('foodcritic'))
class TestFoodcritic(TestCase):
needs_critic = skipIf(critic_missing, 'Missing foodcritic, cannot run')
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
@needs_critic
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
@needs_critic
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
| from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from unittest import TestCase
from nose.tools import eq_
class TestFoodcritic(TestCase):
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
Fix tests failing when foodcritic is not installed.
Tool tests should skip, not fail.from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from lintreview.utils import in_path
from unittest import TestCase, skipIf
from nose.tools import eq_
critic_missing = not(in_path('foodcritic'))
class TestFoodcritic(TestCase):
needs_critic = skipIf(critic_missing, 'Missing foodcritic, cannot run')
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
@needs_critic
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
@needs_critic
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
| <commit_before>from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from unittest import TestCase
from nose.tools import eq_
class TestFoodcritic(TestCase):
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
<commit_msg>Fix tests failing when foodcritic is not installed.
Tool tests should skip, not fail.<commit_after>from lintreview.review import Comment
from lintreview.review import Problems
from lintreview.tools.foodcritic import Foodcritic
from lintreview.utils import in_path
from unittest import TestCase, skipIf
from nose.tools import eq_
critic_missing = not(in_path('foodcritic'))
class TestFoodcritic(TestCase):
needs_critic = skipIf(critic_missing, 'Missing foodcritic, cannot run')
fixtures = [
'tests/fixtures/foodcritic/noerrors',
'tests/fixtures/foodcritic/errors',
]
def setUp(self):
self.problems = Problems()
@needs_critic
def test_process_cookbook_pass(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[0])
self.tool.process_files(None)
eq_([], self.problems.all())
@needs_critic
def test_process_cookbook_fail(self):
self.tool = Foodcritic(self.problems, None, self.fixtures[1])
self.tool.process_files(None)
problems = self.problems.all()
eq_(5, len(problems))
expected = Comment(
'tests/fixtures/foodcritic/errors/recipes/apache2.rb', 1, 1,
'FC007: Ensure recipe dependencies are reflected in cookbook '
'metadata')
eq_(expected, problems[1])
|
7f2df3979458df73e4e3f0a9fdcb16905960de81 | _config.py | _config.py | # Amazon S3 Settings
AWS_KEY = 'REQUIRED'
AWS_SECRET_KEY = 'REQUIRED'
AWS_BUCKET = 'REQUIRED'
AWS_DIRECTORY = '' # Leave blank *not false* unless project not at base URL
# i.e. example.com/apps/ instead of example.com/
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
| # Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = ''
AWS_DIRECTORY = '' # Use if S3 bucket is not root
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
| Update settings for new configuration | Update settings for new configuration
| Python | apache-2.0 | vprnet/EOTS-iframe-widget,vprnet/app-template,vprnet/live-from-the-fort,vprnet/timeline-dcf-systemic-failure,vprnet/app-template,vprnet/old-app-template,vprnet/app-template,vprnet/interactive-transcript-gov-peter-shumlins-third-inaugural-address,vprnet/soundcloud-podcast,vprnet/interactive-transcript-gov-peter-shumlins-2015-budget-speech,vprnet/interactive-transcript-gov-peter-shumlins-2015-budget-speech,vprnet/old-app-template,vprnet/EOTS-iframe-widget,vprnet/EOTS-iframe-widget,vprnet/interactive-transcript-gov-peter-shumlins-third-inaugural-address,vprnet/live-from-the-fort,vprnet/timeline-dcf-systemic-failure,vprnet/google-s3-json,vprnet/live-from-the-fort | # Amazon S3 Settings
AWS_KEY = 'REQUIRED'
AWS_SECRET_KEY = 'REQUIRED'
AWS_BUCKET = 'REQUIRED'
AWS_DIRECTORY = '' # Leave blank *not false* unless project not at base URL
# i.e. example.com/apps/ instead of example.com/
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
Update settings for new configuration | # Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = ''
AWS_DIRECTORY = '' # Use if S3 bucket is not root
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
| <commit_before># Amazon S3 Settings
AWS_KEY = 'REQUIRED'
AWS_SECRET_KEY = 'REQUIRED'
AWS_BUCKET = 'REQUIRED'
AWS_DIRECTORY = '' # Leave blank *not false* unless project not at base URL
# i.e. example.com/apps/ instead of example.com/
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
<commit_msg>Update settings for new configuration<commit_after> | # Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = ''
AWS_DIRECTORY = '' # Use if S3 bucket is not root
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
| # Amazon S3 Settings
AWS_KEY = 'REQUIRED'
AWS_SECRET_KEY = 'REQUIRED'
AWS_BUCKET = 'REQUIRED'
AWS_DIRECTORY = '' # Leave blank *not false* unless project not at base URL
# i.e. example.com/apps/ instead of example.com/
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
Update settings for new configuration# Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = ''
AWS_DIRECTORY = '' # Use if S3 bucket is not root
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
| <commit_before># Amazon S3 Settings
AWS_KEY = 'REQUIRED'
AWS_SECRET_KEY = 'REQUIRED'
AWS_BUCKET = 'REQUIRED'
AWS_DIRECTORY = '' # Leave blank *not false* unless project not at base URL
# i.e. example.com/apps/ instead of example.com/
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
<commit_msg>Update settings for new configuration<commit_after># Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = ''
AWS_DIRECTORY = '' # Use if S3 bucket is not root
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.pyc', '.py', '.rb', '.md']
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
|
6c1a285d58825942e51689e7370316151345ab1f | examples/tornado/auth_demo.py | examples/tornado/auth_demo.py | from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main])
| from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main], settings={'limits.buffer_size': 4 * 1024})
| Add the settings to the authdemo. | Add the settings to the authdemo. | Python | bsd-3-clause | ged/mongrel2,ged/mongrel2,ged/mongrel2,ged/mongrel2 | from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main])
Add the settings to the authdemo. | from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main], settings={'limits.buffer_size': 4 * 1024})
| <commit_before>from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main])
<commit_msg>Add the settings to the authdemo.<commit_after> | from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main], settings={'limits.buffer_size': 4 * 1024})
| from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main])
Add the settings to the authdemo.from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main], settings={'limits.buffer_size': 4 * 1024})
| <commit_before>from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main])
<commit_msg>Add the settings to the authdemo.<commit_after>from mongrel2.config import *
main = Server(
uuid="f400bf85-4538-4f7a-8908-67e313d515c2",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
default_host="localhost",
name="test",
pid_file="/run/mongrel2.pid",
port=6767,
hosts = [
Host(name="localhost",
routes={ r'/(.*)': Proxy(addr='127.0.0.1', port=8888) })
]
)
commit([main], settings={'limits.buffer_size': 4 * 1024})
|
0e8d4e6649b6a48ac7bd87746574119a5ce5fd1a | qiime_studio/api/v1.py | qiime_studio/api/v1.py | from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def plugins():
pm = PluginManager()
return jsonify(pm.plugins)
| from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
| Add plugin workflow fetching to API | Add plugin workflow fetching to API
| Python | bsd-3-clause | jakereps/qiime-studio,qiime2/qiime-studio,qiime2/qiime-studio-frontend,jakereps/qiime-studio,jakereps/qiime-studio,qiime2/qiime-studio-frontend,jakereps/qiime-studio-frontend,qiime2/qiime-studio,jakereps/qiime-studio-frontend,qiime2/qiime-studio | from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def plugins():
pm = PluginManager()
return jsonify(pm.plugins)
Add plugin workflow fetching to API | from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
| <commit_before>from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def plugins():
pm = PluginManager()
return jsonify(pm.plugins)
<commit_msg>Add plugin workflow fetching to API<commit_after> | from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
| from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def plugins():
pm = PluginManager()
return jsonify(pm.plugins)
Add plugin workflow fetching to APIfrom flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
| <commit_before>from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def plugins():
pm = PluginManager()
return jsonify(pm.plugins)
<commit_msg>Add plugin workflow fetching to API<commit_after>from flask import Blueprint, jsonify
from .security import validate_request_authentication
from qiime.sdk import PluginManager
PLUGIN_MANAGER = PluginManager()
v1 = Blueprint('v1', __name__)
v1.before_request(validate_request_authentication)
@v1.route('/', methods=['GET', 'POST'])
def root():
return jsonify(content="!")
@v1.route('/plugins', methods=['GET'])
def api_plugins():
plugin_list = list(PLUGIN_MANAGER.plugins.keys())
return jsonify({"names": plugin_list})
@v1.route('/workflows/<plugin_name>', methods=['GET'])
def api_workflows(plugin_name):
plugin = PLUGIN_MANAGER.plugins[plugin_name]
workflows_dict = {}
for key, value in plugin.workflows.items():
workflows_dict[key] = {}
workflows_dict[key]['info'] = "Produces: {}".format(list(value.signature.output_artifacts.values()))
return jsonify({"workflows": workflows_dict})
|
ccdb064c0523e9293dca13adefa13d155d372505 | spotifyconnect/sink.py | spotifyconnect/sink.py | from __future__ import unicode_literals
import spotifyconnect
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
| from __future__ import unicode_literals
import spotifyconnect
__all__ = [
'Sink'
]
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
| Add Sink class to initial spotify-connect import | Add Sink class to initial spotify-connect import
| Python | apache-2.0 | chukysoria/pyspotify-connect,chukysoria/pyspotify-connect | from __future__ import unicode_literals
import spotifyconnect
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
Add Sink class to initial spotify-connect import | from __future__ import unicode_literals
import spotifyconnect
__all__ = [
'Sink'
]
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
| <commit_before>from __future__ import unicode_literals
import spotifyconnect
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
<commit_msg>Add Sink class to initial spotify-connect import<commit_after> | from __future__ import unicode_literals
import spotifyconnect
__all__ = [
'Sink'
]
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
| from __future__ import unicode_literals
import spotifyconnect
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
Add Sink class to initial spotify-connect importfrom __future__ import unicode_literals
import spotifyconnect
__all__ = [
'Sink'
]
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
| <commit_before>from __future__ import unicode_literals
import spotifyconnect
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
<commit_msg>Add Sink class to initial spotify-connect import<commit_after>from __future__ import unicode_literals
import spotifyconnect
__all__ = [
'Sink'
]
class Sink(object):
def on(self):
"""Turn on the alsa_sink sink.
This is done automatically when the sink is instantiated, so you'll
only need to call this method if you ever call :meth:`off` and want to
turn the sink back on.
"""
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
spotifyconnect._session_instance.player.on(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close()
def _on_music_delivery(self, audio_format, frames, num_frames, pending, session):
# This method is called from an internal libspotify thread and must
# not block in any way.
raise NotImplementedError
def _close(self):
pass
|
0a1d7a76407f834a40d8cb96312cf6a5d322c65c | datastage/web/user/models.py | datastage/web/user/models.py | import pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User | import dpam.pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return dpam.pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User
| Use django_pams pam.authenticate, instead of some other pam module. | Use django_pams pam.authenticate, instead of some other pam module.
| Python | mit | dataflow/DataStage,dataflow/DataStage,dataflow/DataStage | import pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = UserUse django_pams pam.authenticate, instead of some other pam module. | import dpam.pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return dpam.pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User
| <commit_before>import pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User<commit_msg>Use django_pams pam.authenticate, instead of some other pam module.<commit_after> | import dpam.pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return dpam.pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User
| import pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = UserUse django_pams pam.authenticate, instead of some other pam module.import dpam.pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return dpam.pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User
| <commit_before>import pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User<commit_msg>Use django_pams pam.authenticate, instead of some other pam module.<commit_after>import dpam.pam
from django.contrib.auth import models as auth_models
class User(auth_models.User):
class Meta:
proxy = True
def check_password(self, raw_password):
return dpam.pam.authenticate(self.username, raw_password)
# Monkey-patch User model
auth_models.User = User
|
1d73536020b577b8847f5ce3be1c8686ee324fe9 | murano/tests/unit/api/middleware/test_version_negotiation.py | murano/tests/unit/api/middleware/test_version_negotiation.py | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertTrue(isinstance(result, versions.Controller))
| # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertIsInstance(result, versions.Controller)
| Change assertTrue(isinstance()) by optimal assert | Change assertTrue(isinstance()) by optimal assert
Some of tests use different method of assertTrue(isinstance(A, B))
or assertEqual(type(A), B). The correct way is to use assertIsInstance(A, B)
provided by testtools
Change-Id: Id6ff634d7af3ee3b2eb4add770b0ba589bb08d1d
| Python | apache-2.0 | openstack/murano,openstack/murano | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertTrue(isinstance(result, versions.Controller))
Change assertTrue(isinstance()) by optimal assert
Some of tests use different method of assertTrue(isinstance(A, B))
or assertEqual(type(A), B). The correct way is to use assertIsInstance(A, B)
provided by testtools
Change-Id: Id6ff634d7af3ee3b2eb4add770b0ba589bb08d1d | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertIsInstance(result, versions.Controller)
| <commit_before># Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertTrue(isinstance(result, versions.Controller))
<commit_msg>Change assertTrue(isinstance()) by optimal assert
Some of tests use different method of assertTrue(isinstance(A, B))
or assertEqual(type(A), B). The correct way is to use assertIsInstance(A, B)
provided by testtools
Change-Id: Id6ff634d7af3ee3b2eb4add770b0ba589bb08d1d<commit_after> | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertIsInstance(result, versions.Controller)
| # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertTrue(isinstance(result, versions.Controller))
Change assertTrue(isinstance()) by optimal assert
Some of tests use different method of assertTrue(isinstance(A, B))
or assertEqual(type(A), B). The correct way is to use assertIsInstance(A, B)
provided by testtools
Change-Id: Id6ff634d7af3ee3b2eb4add770b0ba589bb08d1d# Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertIsInstance(result, versions.Controller)
| <commit_before># Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertTrue(isinstance(result, versions.Controller))
<commit_msg>Change assertTrue(isinstance()) by optimal assert
Some of tests use different method of assertTrue(isinstance(A, B))
or assertEqual(type(A), B). The correct way is to use assertIsInstance(A, B)
provided by testtools
Change-Id: Id6ff634d7af3ee3b2eb4add770b0ba589bb08d1d<commit_after># Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from murano.api import versions
from murano.api.middleware import version_negotiation
from murano.tests.unit import base
class MiddlewareVersionNegotiationTest(base.MuranoTestCase):
def test_middleware_version_negotiation_default(self):
middleware_vn = version_negotiation.VersionNegotiationFilter(None)
request = webob.Request.blank('/environments')
result = middleware_vn.process_request(request)
self.assertIsInstance(result, versions.Controller)
|
8ab5345f5c0c62606a28c608e45e08a89a96dddb | tests/install_tests.py | tests/install_tests.py | import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
output = subprocess.check_output(['node', '--version'])
self.assertEquals(output.strip(), 'v0.8.11')
| import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
proc = subprocess.Popen(['node', '--version'], stdout=subprocess.PIPE)
output = proc.stdout.read()
self.assertEquals(output.strip(), 'v0.8.11')
| Fix test to work with python 2.6 | Fix test to work with python 2.6
| Python | bsd-3-clause | elbaschid/virtual-node | import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
output = subprocess.check_output(['node', '--version'])
self.assertEquals(output.strip(), 'v0.8.11')
Fix test to work with python 2.6 | import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
proc = subprocess.Popen(['node', '--version'], stdout=subprocess.PIPE)
output = proc.stdout.read()
self.assertEquals(output.strip(), 'v0.8.11')
| <commit_before>import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
output = subprocess.check_output(['node', '--version'])
self.assertEquals(output.strip(), 'v0.8.11')
<commit_msg>Fix test to work with python 2.6<commit_after> | import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
proc = subprocess.Popen(['node', '--version'], stdout=subprocess.PIPE)
output = proc.stdout.read()
self.assertEquals(output.strip(), 'v0.8.11')
| import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
output = subprocess.check_output(['node', '--version'])
self.assertEquals(output.strip(), 'v0.8.11')
Fix test to work with python 2.6import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
proc = subprocess.Popen(['node', '--version'], stdout=subprocess.PIPE)
output = proc.stdout.read()
self.assertEquals(output.strip(), 'v0.8.11')
| <commit_before>import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
output = subprocess.check_output(['node', '--version'])
self.assertEquals(output.strip(), 'v0.8.11')
<commit_msg>Fix test to work with python 2.6<commit_after>import sys
import logging
import subprocess
sys.path.insert(0, '..')
from unittest2 import TestCase
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class TestNode(TestCase):
def test_is_installed(self):
proc = subprocess.Popen(['node', '--version'], stdout=subprocess.PIPE)
output = proc.stdout.read()
self.assertEquals(output.strip(), 'v0.8.11')
|
9888a03368ad7b440cc43384024c71147aa647a3 | fireplace/cards/tgt/shaman.py | fireplace/cards/tgt/shaman.py | from ..utils import *
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
| from ..utils import *
##
# Hero Powers
# Lightning Jolt
class AT_050t:
play = Hit(TARGET, 2)
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
##
# Spells
# Elemental Destruction
class AT_051:
play = Hit(ALL_MINIONS, RandomNumber(4, 5))
# Ancestral Knowledge
class AT_053:
play = Draw(CONTROLLER) * 2
##
# Weapons
# Charged Hammer
class AT_050:
deathrattle = Summon(CONTROLLER, "AT_050t")
| Implement Shaman cards for The Grand Tournament | Implement Shaman cards for The Grand Tournament
| Python | agpl-3.0 | oftc-ftw/fireplace,smallnamespace/fireplace,amw2104/fireplace,jleclanche/fireplace,Ragowit/fireplace,beheh/fireplace,smallnamespace/fireplace,NightKev/fireplace,liujimj/fireplace,Meerkov/fireplace,liujimj/fireplace,Ragowit/fireplace,oftc-ftw/fireplace,amw2104/fireplace,Meerkov/fireplace | from ..utils import *
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
Implement Shaman cards for The Grand Tournament | from ..utils import *
##
# Hero Powers
# Lightning Jolt
class AT_050t:
play = Hit(TARGET, 2)
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
##
# Spells
# Elemental Destruction
class AT_051:
play = Hit(ALL_MINIONS, RandomNumber(4, 5))
# Ancestral Knowledge
class AT_053:
play = Draw(CONTROLLER) * 2
##
# Weapons
# Charged Hammer
class AT_050:
deathrattle = Summon(CONTROLLER, "AT_050t")
| <commit_before>from ..utils import *
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
<commit_msg>Implement Shaman cards for The Grand Tournament<commit_after> | from ..utils import *
##
# Hero Powers
# Lightning Jolt
class AT_050t:
play = Hit(TARGET, 2)
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
##
# Spells
# Elemental Destruction
class AT_051:
play = Hit(ALL_MINIONS, RandomNumber(4, 5))
# Ancestral Knowledge
class AT_053:
play = Draw(CONTROLLER) * 2
##
# Weapons
# Charged Hammer
class AT_050:
deathrattle = Summon(CONTROLLER, "AT_050t")
| from ..utils import *
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
Implement Shaman cards for The Grand Tournamentfrom ..utils import *
##
# Hero Powers
# Lightning Jolt
class AT_050t:
play = Hit(TARGET, 2)
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
##
# Spells
# Elemental Destruction
class AT_051:
play = Hit(ALL_MINIONS, RandomNumber(4, 5))
# Ancestral Knowledge
class AT_053:
play = Draw(CONTROLLER) * 2
##
# Weapons
# Charged Hammer
class AT_050:
deathrattle = Summon(CONTROLLER, "AT_050t")
| <commit_before>from ..utils import *
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
<commit_msg>Implement Shaman cards for The Grand Tournament<commit_after>from ..utils import *
##
# Hero Powers
# Lightning Jolt
class AT_050t:
play = Hit(TARGET, 2)
##
# Minions
# Tuskarr Totemic
class AT_046:
play = Summon(CONTROLLER, RandomTotem())
# Draenei Totemcarver
class AT_047:
play = Buff(SELF, "AT_047e") * Count(FRIENDLY_MINIONS + TOTEM)
# Thunder Bluff Valiant
class AT_049:
inspire = Buff(FRIENDLY_MINIONS + TOTEM, "AT_049e")
##
# Spells
# Elemental Destruction
class AT_051:
play = Hit(ALL_MINIONS, RandomNumber(4, 5))
# Ancestral Knowledge
class AT_053:
play = Draw(CONTROLLER) * 2
##
# Weapons
# Charged Hammer
class AT_050:
deathrattle = Summon(CONTROLLER, "AT_050t")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.