id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
244,700
stephanepechard/projy
projy/cmdline.py
run_list
def run_list(): """ Print the list of all available templates. """ term = TerminalView() term.print_info("These are the available templates:") import pkgutil, projy.templates pkgpath = os.path.dirname(projy.templates.__file__) templates = [name for _, name, _ in pkgutil.iter_modules([pkgpath])] for name in templates: # the father of all templates, not a real usable one if (name != 'ProjyTemplate'): term.print_info(term.text_in_color(template_name_from_class_name(name), TERM_PINK))
python
def run_list(): """ Print the list of all available templates. """ term = TerminalView() term.print_info("These are the available templates:") import pkgutil, projy.templates pkgpath = os.path.dirname(projy.templates.__file__) templates = [name for _, name, _ in pkgutil.iter_modules([pkgpath])] for name in templates: # the father of all templates, not a real usable one if (name != 'ProjyTemplate'): term.print_info(term.text_in_color(template_name_from_class_name(name), TERM_PINK))
[ "def", "run_list", "(", ")", ":", "term", "=", "TerminalView", "(", ")", "term", ".", "print_info", "(", "\"These are the available templates:\"", ")", "import", "pkgutil", ",", "projy", ".", "templates", "pkgpath", "=", "os", ".", "path", ".", "dirname", "(", "projy", ".", "templates", ".", "__file__", ")", "templates", "=", "[", "name", "for", "_", ",", "name", ",", "_", "in", "pkgutil", ".", "iter_modules", "(", "[", "pkgpath", "]", ")", "]", "for", "name", "in", "templates", ":", "# the father of all templates, not a real usable one", "if", "(", "name", "!=", "'ProjyTemplate'", ")", ":", "term", ".", "print_info", "(", "term", ".", "text_in_color", "(", "template_name_from_class_name", "(", "name", ")", ",", "TERM_PINK", ")", ")" ]
Print the list of all available templates.
[ "Print", "the", "list", "of", "all", "available", "templates", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L41-L51
244,701
stephanepechard/projy
projy/cmdline.py
run_info
def run_info(template): """ Print information about a specific template. """ template.project_name = 'TowelStuff' # fake project name, always the same name = template_name_from_class_name(template.__class__.__name__) term = TerminalView() term.print_info("Content of template {} with an example project " \ "named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN))) dir_name = None for file_info in sorted(template.files(), key=lambda dir: dir[0]): directory = file_name = template_name = '' if file_info[0]: directory = file_info[0] if file_info[1]: file_name = file_info[1] if file_info[2]: template_name = '\t\t - ' + file_info[2] if (directory != dir_name): term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK)) dir_name = directory term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name) # print substitutions try: subs = template.substitutes().keys() if len(subs) > 0: subs.sort() term.print_info("\nSubstitutions of this template are: ") max_len = 0 for key in subs: if max_len < len(key): max_len = len(key) for key in subs: term.print_info(u"\t{0:{1}} -> {2}". format(key, max_len, template.substitutes()[key])) except AttributeError: pass
python
def run_info(template): """ Print information about a specific template. """ template.project_name = 'TowelStuff' # fake project name, always the same name = template_name_from_class_name(template.__class__.__name__) term = TerminalView() term.print_info("Content of template {} with an example project " \ "named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN))) dir_name = None for file_info in sorted(template.files(), key=lambda dir: dir[0]): directory = file_name = template_name = '' if file_info[0]: directory = file_info[0] if file_info[1]: file_name = file_info[1] if file_info[2]: template_name = '\t\t - ' + file_info[2] if (directory != dir_name): term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK)) dir_name = directory term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name) # print substitutions try: subs = template.substitutes().keys() if len(subs) > 0: subs.sort() term.print_info("\nSubstitutions of this template are: ") max_len = 0 for key in subs: if max_len < len(key): max_len = len(key) for key in subs: term.print_info(u"\t{0:{1}} -> {2}". format(key, max_len, template.substitutes()[key])) except AttributeError: pass
[ "def", "run_info", "(", "template", ")", ":", "template", ".", "project_name", "=", "'TowelStuff'", "# fake project name, always the same", "name", "=", "template_name_from_class_name", "(", "template", ".", "__class__", ".", "__name__", ")", "term", "=", "TerminalView", "(", ")", "term", ".", "print_info", "(", "\"Content of template {} with an example project \"", "\"named 'TowelStuff':\"", ".", "format", "(", "term", ".", "text_in_color", "(", "name", ",", "TERM_GREEN", ")", ")", ")", "dir_name", "=", "None", "for", "file_info", "in", "sorted", "(", "template", ".", "files", "(", ")", ",", "key", "=", "lambda", "dir", ":", "dir", "[", "0", "]", ")", ":", "directory", "=", "file_name", "=", "template_name", "=", "''", "if", "file_info", "[", "0", "]", ":", "directory", "=", "file_info", "[", "0", "]", "if", "file_info", "[", "1", "]", ":", "file_name", "=", "file_info", "[", "1", "]", "if", "file_info", "[", "2", "]", ":", "template_name", "=", "'\\t\\t - '", "+", "file_info", "[", "2", "]", "if", "(", "directory", "!=", "dir_name", ")", ":", "term", ".", "print_info", "(", "'\\n\\t'", "+", "term", ".", "text_in_color", "(", "directory", "+", "'/'", ",", "TERM_PINK", ")", ")", "dir_name", "=", "directory", "term", ".", "print_info", "(", "'\\t\\t'", "+", "term", ".", "text_in_color", "(", "file_name", ",", "TERM_YELLOW", ")", "+", "template_name", ")", "# print substitutions", "try", ":", "subs", "=", "template", ".", "substitutes", "(", ")", ".", "keys", "(", ")", "if", "len", "(", "subs", ")", ">", "0", ":", "subs", ".", "sort", "(", ")", "term", ".", "print_info", "(", "\"\\nSubstitutions of this template are: \"", ")", "max_len", "=", "0", "for", "key", "in", "subs", ":", "if", "max_len", "<", "len", "(", "key", ")", ":", "max_len", "=", "len", "(", "key", ")", "for", "key", "in", "subs", ":", "term", ".", "print_info", "(", "u\"\\t{0:{1}} -> {2}\"", ".", "format", "(", "key", ",", "max_len", ",", "template", ".", "substitutes", "(", ")", "[", "key", "]", ")", ")", "except", "AttributeError", ":", "pass" ]
Print information about a specific template.
[ "Print", "information", "about", "a", "specific", "template", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L54-L91
244,702
stephanepechard/projy
projy/cmdline.py
template_class_from_name
def template_class_from_name(name): """ Return the template class object from agiven name. """ # import the right template module term = TerminalView() template_name = name + 'Template' try: __import__('projy.templates.' + template_name) template_mod = sys.modules['projy.templates.' + template_name] except ImportError: term.print_error_and_exit("Unable to find {}".format(name)) # import the class from the module try: template_class = getattr(template_mod, template_name) except AttributeError: term.print_error_and_exit("Unable to create a template {}".format(name)) return template_class()
python
def template_class_from_name(name): """ Return the template class object from agiven name. """ # import the right template module term = TerminalView() template_name = name + 'Template' try: __import__('projy.templates.' + template_name) template_mod = sys.modules['projy.templates.' + template_name] except ImportError: term.print_error_and_exit("Unable to find {}".format(name)) # import the class from the module try: template_class = getattr(template_mod, template_name) except AttributeError: term.print_error_and_exit("Unable to create a template {}".format(name)) return template_class()
[ "def", "template_class_from_name", "(", "name", ")", ":", "# import the right template module", "term", "=", "TerminalView", "(", ")", "template_name", "=", "name", "+", "'Template'", "try", ":", "__import__", "(", "'projy.templates.'", "+", "template_name", ")", "template_mod", "=", "sys", ".", "modules", "[", "'projy.templates.'", "+", "template_name", "]", "except", "ImportError", ":", "term", ".", "print_error_and_exit", "(", "\"Unable to find {}\"", ".", "format", "(", "name", ")", ")", "# import the class from the module", "try", ":", "template_class", "=", "getattr", "(", "template_mod", ",", "template_name", ")", "except", "AttributeError", ":", "term", ".", "print_error_and_exit", "(", "\"Unable to create a template {}\"", ".", "format", "(", "name", ")", ")", "return", "template_class", "(", ")" ]
Return the template class object from agiven name.
[ "Return", "the", "template", "class", "object", "from", "agiven", "name", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L94-L110
244,703
mbodenhamer/syn
syn/base_utils/iter.py
iteration_length
def iteration_length(N, start=0, step=1): '''Return the number of iteration steps over a list of length N, starting at index start, proceeding step elements at a time. ''' if N < 0: raise ValueError('N cannot be negative') if start < 0: start += N if start < 0: raise ValueError('Invalid start value') if step < 0: step = -step new_N = start + 1 if new_N > N: raise ValueError('Invalid parameters') N = new_N start = 0 ret = int(math.ceil((N - start) / float(step))) return max(0, ret)
python
def iteration_length(N, start=0, step=1): '''Return the number of iteration steps over a list of length N, starting at index start, proceeding step elements at a time. ''' if N < 0: raise ValueError('N cannot be negative') if start < 0: start += N if start < 0: raise ValueError('Invalid start value') if step < 0: step = -step new_N = start + 1 if new_N > N: raise ValueError('Invalid parameters') N = new_N start = 0 ret = int(math.ceil((N - start) / float(step))) return max(0, ret)
[ "def", "iteration_length", "(", "N", ",", "start", "=", "0", ",", "step", "=", "1", ")", ":", "if", "N", "<", "0", ":", "raise", "ValueError", "(", "'N cannot be negative'", ")", "if", "start", "<", "0", ":", "start", "+=", "N", "if", "start", "<", "0", ":", "raise", "ValueError", "(", "'Invalid start value'", ")", "if", "step", "<", "0", ":", "step", "=", "-", "step", "new_N", "=", "start", "+", "1", "if", "new_N", ">", "N", ":", "raise", "ValueError", "(", "'Invalid parameters'", ")", "N", "=", "new_N", "start", "=", "0", "ret", "=", "int", "(", "math", ".", "ceil", "(", "(", "N", "-", "start", ")", "/", "float", "(", "step", ")", ")", ")", "return", "max", "(", "0", ",", "ret", ")" ]
Return the number of iteration steps over a list of length N, starting at index start, proceeding step elements at a time.
[ "Return", "the", "number", "of", "iteration", "steps", "over", "a", "list", "of", "length", "N", "starting", "at", "index", "start", "proceeding", "step", "elements", "at", "a", "time", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/iter.py#L59-L80
244,704
noirbizarre/minibench
minibench/runner.py
BenchmarkRunner.run
def run(self, **kwargs): ''' Run all benchmarks. Extras kwargs are passed to benchmarks construtors. ''' self.report_start() for bench in self.benchmarks: bench = bench(before=self.report_before_method, after=self.report_after_method, after_each=self.report_progress, debug=self.debug, **kwargs) self.report_before_class(bench) bench.run() self.report_after_class(bench) self.runned.append(bench) self.report_end()
python
def run(self, **kwargs): ''' Run all benchmarks. Extras kwargs are passed to benchmarks construtors. ''' self.report_start() for bench in self.benchmarks: bench = bench(before=self.report_before_method, after=self.report_after_method, after_each=self.report_progress, debug=self.debug, **kwargs) self.report_before_class(bench) bench.run() self.report_after_class(bench) self.runned.append(bench) self.report_end()
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "report_start", "(", ")", "for", "bench", "in", "self", ".", "benchmarks", ":", "bench", "=", "bench", "(", "before", "=", "self", ".", "report_before_method", ",", "after", "=", "self", ".", "report_after_method", ",", "after_each", "=", "self", ".", "report_progress", ",", "debug", "=", "self", ".", "debug", ",", "*", "*", "kwargs", ")", "self", ".", "report_before_class", "(", "bench", ")", "bench", ".", "run", "(", ")", "self", ".", "report_after_class", "(", "bench", ")", "self", ".", "runned", ".", "append", "(", "bench", ")", "self", ".", "report_end", "(", ")" ]
Run all benchmarks. Extras kwargs are passed to benchmarks construtors.
[ "Run", "all", "benchmarks", "." ]
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/runner.py#L46-L63
244,705
noirbizarre/minibench
minibench/runner.py
BenchmarkRunner.load_module
def load_module(self, filename): '''Load a benchmark module from file''' if not isinstance(filename, string_types): return filename basename = os.path.splitext(os.path.basename(filename))[0] basename = basename.replace('.bench', '') modulename = 'benchmarks.{0}'.format(basename) return load_module(modulename, filename)
python
def load_module(self, filename): '''Load a benchmark module from file''' if not isinstance(filename, string_types): return filename basename = os.path.splitext(os.path.basename(filename))[0] basename = basename.replace('.bench', '') modulename = 'benchmarks.{0}'.format(basename) return load_module(modulename, filename)
[ "def", "load_module", "(", "self", ",", "filename", ")", ":", "if", "not", "isinstance", "(", "filename", ",", "string_types", ")", ":", "return", "filename", "basename", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "[", "0", "]", "basename", "=", "basename", ".", "replace", "(", "'.bench'", ",", "''", ")", "modulename", "=", "'benchmarks.{0}'", ".", "format", "(", "basename", ")", "return", "load_module", "(", "modulename", ",", "filename", ")" ]
Load a benchmark module from file
[ "Load", "a", "benchmark", "module", "from", "file" ]
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/runner.py#L65-L72
244,706
noirbizarre/minibench
minibench/runner.py
BenchmarkRunner.load_from_module
def load_from_module(self, module): '''Load all benchmarks from a given module''' benchmarks = [] for name in dir(module): obj = getattr(module, name) if (inspect.isclass(obj) and issubclass(obj, Benchmark) and obj != Benchmark): benchmarks.append(obj) return benchmarks
python
def load_from_module(self, module): '''Load all benchmarks from a given module''' benchmarks = [] for name in dir(module): obj = getattr(module, name) if (inspect.isclass(obj) and issubclass(obj, Benchmark) and obj != Benchmark): benchmarks.append(obj) return benchmarks
[ "def", "load_from_module", "(", "self", ",", "module", ")", ":", "benchmarks", "=", "[", "]", "for", "name", "in", "dir", "(", "module", ")", ":", "obj", "=", "getattr", "(", "module", ",", "name", ")", "if", "(", "inspect", ".", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "Benchmark", ")", "and", "obj", "!=", "Benchmark", ")", ":", "benchmarks", ".", "append", "(", "obj", ")", "return", "benchmarks" ]
Load all benchmarks from a given module
[ "Load", "all", "benchmarks", "from", "a", "given", "module" ]
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/runner.py#L74-L82
244,707
radjkarl/fancyTools
fancytools/math/boundingBox.py
boundingBox
def boundingBox(booleanArray): """ return indices of the smallest bounding box enclosing all non-zero values within an array >>> a = np.array([ [0,0,0,0], ... [0,1,0,1], ... [0,0,1,0], ... [1,0,0,0], ... [0,0,0,0] ]) >>> print ( boundingBox(a) ) (slice(1, 3, None), slice(0, 3, None)) """ w = np.where(booleanArray) p = [] for i in w: if len(i): p.append(slice(i.min(), i.max())) else: p.append(slice(0, 0)) # return None return tuple(p)
python
def boundingBox(booleanArray): """ return indices of the smallest bounding box enclosing all non-zero values within an array >>> a = np.array([ [0,0,0,0], ... [0,1,0,1], ... [0,0,1,0], ... [1,0,0,0], ... [0,0,0,0] ]) >>> print ( boundingBox(a) ) (slice(1, 3, None), slice(0, 3, None)) """ w = np.where(booleanArray) p = [] for i in w: if len(i): p.append(slice(i.min(), i.max())) else: p.append(slice(0, 0)) # return None return tuple(p)
[ "def", "boundingBox", "(", "booleanArray", ")", ":", "w", "=", "np", ".", "where", "(", "booleanArray", ")", "p", "=", "[", "]", "for", "i", "in", "w", ":", "if", "len", "(", "i", ")", ":", "p", ".", "append", "(", "slice", "(", "i", ".", "min", "(", ")", ",", "i", ".", "max", "(", ")", ")", ")", "else", ":", "p", ".", "append", "(", "slice", "(", "0", ",", "0", ")", ")", "# return None", "return", "tuple", "(", "p", ")" ]
return indices of the smallest bounding box enclosing all non-zero values within an array >>> a = np.array([ [0,0,0,0], ... [0,1,0,1], ... [0,0,1,0], ... [1,0,0,0], ... [0,0,0,0] ]) >>> print ( boundingBox(a) ) (slice(1, 3, None), slice(0, 3, None))
[ "return", "indices", "of", "the", "smallest", "bounding", "box", "enclosing", "all", "non", "-", "zero", "values", "within", "an", "array" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/boundingBox.py#L5-L27
244,708
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
FromMessageGetSimpleElementDeclaration
def FromMessageGetSimpleElementDeclaration(message): '''If message consists of one part with an element attribute, and this element is a simpleType return a string representing the python type, else return None. ''' assert isinstance(message, WSDLTools.Message), 'expecting WSDLTools.Message' if len(message.parts) == 1 and message.parts[0].element is not None: part = message.parts[0] nsuri,name = part.element wsdl = message.getWSDL() types = wsdl.types if types.has_key(nsuri) and types[nsuri].elements.has_key(name): e = types[nsuri].elements[name] if isinstance(e, XMLSchema.ElementDeclaration) is True and e.getAttribute('type'): typ = e.getAttribute('type') bt = BaseTypeInterpreter() ptype = bt.get_pythontype(typ[1], typ[0]) return ptype return None
python
def FromMessageGetSimpleElementDeclaration(message): '''If message consists of one part with an element attribute, and this element is a simpleType return a string representing the python type, else return None. ''' assert isinstance(message, WSDLTools.Message), 'expecting WSDLTools.Message' if len(message.parts) == 1 and message.parts[0].element is not None: part = message.parts[0] nsuri,name = part.element wsdl = message.getWSDL() types = wsdl.types if types.has_key(nsuri) and types[nsuri].elements.has_key(name): e = types[nsuri].elements[name] if isinstance(e, XMLSchema.ElementDeclaration) is True and e.getAttribute('type'): typ = e.getAttribute('type') bt = BaseTypeInterpreter() ptype = bt.get_pythontype(typ[1], typ[0]) return ptype return None
[ "def", "FromMessageGetSimpleElementDeclaration", "(", "message", ")", ":", "assert", "isinstance", "(", "message", ",", "WSDLTools", ".", "Message", ")", ",", "'expecting WSDLTools.Message'", "if", "len", "(", "message", ".", "parts", ")", "==", "1", "and", "message", ".", "parts", "[", "0", "]", ".", "element", "is", "not", "None", ":", "part", "=", "message", ".", "parts", "[", "0", "]", "nsuri", ",", "name", "=", "part", ".", "element", "wsdl", "=", "message", ".", "getWSDL", "(", ")", "types", "=", "wsdl", ".", "types", "if", "types", ".", "has_key", "(", "nsuri", ")", "and", "types", "[", "nsuri", "]", ".", "elements", ".", "has_key", "(", "name", ")", ":", "e", "=", "types", "[", "nsuri", "]", ".", "elements", "[", "name", "]", "if", "isinstance", "(", "e", ",", "XMLSchema", ".", "ElementDeclaration", ")", "is", "True", "and", "e", ".", "getAttribute", "(", "'type'", ")", ":", "typ", "=", "e", ".", "getAttribute", "(", "'type'", ")", "bt", "=", "BaseTypeInterpreter", "(", ")", "ptype", "=", "bt", ".", "get_pythontype", "(", "typ", "[", "1", "]", ",", "typ", "[", "0", "]", ")", "return", "ptype", "return", "None" ]
If message consists of one part with an element attribute, and this element is a simpleType return a string representing the python type, else return None.
[ "If", "message", "consists", "of", "one", "part", "with", "an", "element", "attribute", "and", "this", "element", "is", "a", "simpleType", "return", "a", "string", "representing", "the", "python", "type", "else", "return", "None", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L86-L107
244,709
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
ContainerBase.getAttributeName
def getAttributeName(self, name): '''represents the aname ''' if self.func_aname is None: return name assert callable(self.func_aname), \ 'expecting callable method for attribute func_aname, not %s' %type(self.func_aname) f = self.func_aname return f(name)
python
def getAttributeName(self, name): '''represents the aname ''' if self.func_aname is None: return name assert callable(self.func_aname), \ 'expecting callable method for attribute func_aname, not %s' %type(self.func_aname) f = self.func_aname return f(name)
[ "def", "getAttributeName", "(", "self", ",", "name", ")", ":", "if", "self", ".", "func_aname", "is", "None", ":", "return", "name", "assert", "callable", "(", "self", ".", "func_aname", ")", ",", "'expecting callable method for attribute func_aname, not %s'", "%", "type", "(", "self", ".", "func_aname", ")", "f", "=", "self", ".", "func_aname", "return", "f", "(", "name", ")" ]
represents the aname
[ "represents", "the", "aname" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L288-L296
244,710
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
TypecodeContainerBase.getPyClass
def getPyClass(self): '''Name of generated inner class that will be specified as pyclass. ''' # --> EXTENDED if self.hasExtPyClass(): classInfo = self.extPyClasses[self.name] return ".".join(classInfo) # <-- return 'Holder'
python
def getPyClass(self): '''Name of generated inner class that will be specified as pyclass. ''' # --> EXTENDED if self.hasExtPyClass(): classInfo = self.extPyClasses[self.name] return ".".join(classInfo) # <-- return 'Holder'
[ "def", "getPyClass", "(", "self", ")", ":", "# --> EXTENDED", "if", "self", ".", "hasExtPyClass", "(", ")", ":", "classInfo", "=", "self", ".", "extPyClasses", "[", "self", ".", "name", "]", "return", "\".\"", ".", "join", "(", "classInfo", ")", "# <-- ", "return", "'Holder'" ]
Name of generated inner class that will be specified as pyclass.
[ "Name", "of", "generated", "inner", "class", "that", "will", "be", "specified", "as", "pyclass", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L1293-L1302
244,711
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
TypecodeContainerBase.getPyClassDefinition
def getPyClassDefinition(self): '''Return a list containing pyclass definition. ''' kw = KW.copy() # --> EXTENDED if self.hasExtPyClass(): classInfo = self.extPyClasses[self.name] kw['classInfo'] = classInfo[0] return ["%(ID3)simport %(classInfo)s" %kw ] # <-- kw['pyclass'] = self.getPyClass() definition = [] definition.append('%(ID3)sclass %(pyclass)s:' %kw) if self.metaclass is not None: kw['type'] = self.metaclass definition.append('%(ID4)s__metaclass__ = %(type)s' %kw) definition.append('%(ID4)stypecode = self' %kw) #TODO: Remove pyclass holder __init__ --> definition.append('%(ID4)sdef __init__(self):' %kw) definition.append('%(ID5)s# pyclass' %kw) # JRB HACK need to call _setElements via getElements self._setUpElements() # JRB HACK need to indent additional one level for el in self.elementAttrs: kw['element'] = el definition.append('%(ID2)s%(element)s' %kw) definition.append('%(ID5)sreturn' %kw) # <-- # pyclass descriptive name if self.name is not None: kw['name'] = self.name definition.append(\ '%(ID3)s%(pyclass)s.__name__ = "%(name)s_Holder"' %kw ) return definition
python
def getPyClassDefinition(self): '''Return a list containing pyclass definition. ''' kw = KW.copy() # --> EXTENDED if self.hasExtPyClass(): classInfo = self.extPyClasses[self.name] kw['classInfo'] = classInfo[0] return ["%(ID3)simport %(classInfo)s" %kw ] # <-- kw['pyclass'] = self.getPyClass() definition = [] definition.append('%(ID3)sclass %(pyclass)s:' %kw) if self.metaclass is not None: kw['type'] = self.metaclass definition.append('%(ID4)s__metaclass__ = %(type)s' %kw) definition.append('%(ID4)stypecode = self' %kw) #TODO: Remove pyclass holder __init__ --> definition.append('%(ID4)sdef __init__(self):' %kw) definition.append('%(ID5)s# pyclass' %kw) # JRB HACK need to call _setElements via getElements self._setUpElements() # JRB HACK need to indent additional one level for el in self.elementAttrs: kw['element'] = el definition.append('%(ID2)s%(element)s' %kw) definition.append('%(ID5)sreturn' %kw) # <-- # pyclass descriptive name if self.name is not None: kw['name'] = self.name definition.append(\ '%(ID3)s%(pyclass)s.__name__ = "%(name)s_Holder"' %kw ) return definition
[ "def", "getPyClassDefinition", "(", "self", ")", ":", "kw", "=", "KW", ".", "copy", "(", ")", "# --> EXTENDED", "if", "self", ".", "hasExtPyClass", "(", ")", ":", "classInfo", "=", "self", ".", "extPyClasses", "[", "self", ".", "name", "]", "kw", "[", "'classInfo'", "]", "=", "classInfo", "[", "0", "]", "return", "[", "\"%(ID3)simport %(classInfo)s\"", "%", "kw", "]", "# <--", "kw", "[", "'pyclass'", "]", "=", "self", ".", "getPyClass", "(", ")", "definition", "=", "[", "]", "definition", ".", "append", "(", "'%(ID3)sclass %(pyclass)s:'", "%", "kw", ")", "if", "self", ".", "metaclass", "is", "not", "None", ":", "kw", "[", "'type'", "]", "=", "self", ".", "metaclass", "definition", ".", "append", "(", "'%(ID4)s__metaclass__ = %(type)s'", "%", "kw", ")", "definition", ".", "append", "(", "'%(ID4)stypecode = self'", "%", "kw", ")", "#TODO: Remove pyclass holder __init__ -->", "definition", ".", "append", "(", "'%(ID4)sdef __init__(self):'", "%", "kw", ")", "definition", ".", "append", "(", "'%(ID5)s# pyclass'", "%", "kw", ")", "# JRB HACK need to call _setElements via getElements", "self", ".", "_setUpElements", "(", ")", "# JRB HACK need to indent additional one level", "for", "el", "in", "self", ".", "elementAttrs", ":", "kw", "[", "'element'", "]", "=", "el", "definition", ".", "append", "(", "'%(ID2)s%(element)s'", "%", "kw", ")", "definition", ".", "append", "(", "'%(ID5)sreturn'", "%", "kw", ")", "# <--", "# pyclass descriptive name", "if", "self", ".", "name", "is", "not", "None", ":", "kw", "[", "'name'", "]", "=", "self", ".", "name", "definition", ".", "append", "(", "'%(ID3)s%(pyclass)s.__name__ = \"%(name)s_Holder\"'", "%", "kw", ")", "return", "definition" ]
Return a list containing pyclass definition.
[ "Return", "a", "list", "containing", "pyclass", "definition", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L1304-L1345
244,712
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
TypecodeContainerBase.nsuriLogic
def nsuriLogic(self): '''set a variable "ns" that represents the targetNamespace in which this item is defined. Used for namespacing local elements. ''' if self.parentClass: return 'ns = %s.%s.schema' %(self.parentClass, self.getClassName()) return 'ns = %s.%s.schema' %(self.getNSAlias(), self.getClassName())
python
def nsuriLogic(self): '''set a variable "ns" that represents the targetNamespace in which this item is defined. Used for namespacing local elements. ''' if self.parentClass: return 'ns = %s.%s.schema' %(self.parentClass, self.getClassName()) return 'ns = %s.%s.schema' %(self.getNSAlias(), self.getClassName())
[ "def", "nsuriLogic", "(", "self", ")", ":", "if", "self", ".", "parentClass", ":", "return", "'ns = %s.%s.schema'", "%", "(", "self", ".", "parentClass", ",", "self", ".", "getClassName", "(", ")", ")", "return", "'ns = %s.%s.schema'", "%", "(", "self", ".", "getNSAlias", "(", ")", ",", "self", ".", "getClassName", "(", ")", ")" ]
set a variable "ns" that represents the targetNamespace in which this item is defined. Used for namespacing local elements.
[ "set", "a", "variable", "ns", "that", "represents", "the", "targetNamespace", "in", "which", "this", "item", "is", "defined", ".", "Used", "for", "namespacing", "local", "elements", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L1347-L1353
244,713
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
MessageTypecodeContainer._getOccurs
def _getOccurs(self, e): '''return a 3 item tuple ''' minOccurs = maxOccurs = '1' nillable = True return minOccurs,maxOccurs,nillable
python
def _getOccurs(self, e): '''return a 3 item tuple ''' minOccurs = maxOccurs = '1' nillable = True return minOccurs,maxOccurs,nillable
[ "def", "_getOccurs", "(", "self", ",", "e", ")", ":", "minOccurs", "=", "maxOccurs", "=", "'1'", "nillable", "=", "True", "return", "minOccurs", ",", "maxOccurs", ",", "nillable" ]
return a 3 item tuple
[ "return", "a", "3", "item", "tuple" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L1752-L1757
244,714
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
MessageTypecodeContainer.getAttributeNames
def getAttributeNames(self): '''returns a list of anames representing the parts of the message. ''' return map(lambda e: self.getAttributeName(e.name), self.tcListElements)
python
def getAttributeNames(self): '''returns a list of anames representing the parts of the message. ''' return map(lambda e: self.getAttributeName(e.name), self.tcListElements)
[ "def", "getAttributeNames", "(", "self", ")", ":", "return", "map", "(", "lambda", "e", ":", "self", ".", "getAttributeName", "(", "e", ".", "name", ")", ",", "self", ".", "tcListElements", ")" ]
returns a list of anames representing the parts of the message.
[ "returns", "a", "list", "of", "anames", "representing", "the", "parts", "of", "the", "message", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L1798-L1802
244,715
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/containers.py
ElementGlobalDefContainer._setContent
def _setContent(self): '''GED defines element name, so also define typecode aname ''' kw = KW.copy() try: kw.update(dict(klass=self.getClassName(), element='ElementDeclaration', literal=self.literalTag(), substitutionGroup=self._substitutionGroupTag(), schema=self.schemaTag(), init=self.simpleConstructor(), ns=self.ns, name=self.name, aname=self.getAttributeName(self.name), baseslogic=self.getBasesLogic(ID3), #ofwhat=self.getTypecodeList(), #atypecode=self.attribute_typecode, #pyclass=self.getPyClass(), alias=NAD.getAlias(self.sKlassNS), subclass=type_class_name(self.sKlass), )) except Exception, ex: args = ['Failure processing an element w/local complexType: %s' %( self._item.getItemTrace())] args += ex.args ex.args = tuple(args) raise if self.local: kw['element'] = 'LocalElementDeclaration' element = [ '%(ID1)sclass %(klass)s(%(element)s):', '%(ID2)s%(literal)s', '%(ID2)s%(schema)s', '%(ID2)s%(substitutionGroup)s', '%(ID2)s%(init)s', '%(ID3)skw["pname"] = ("%(ns)s","%(name)s")', '%(ID3)skw["aname"] = "%(aname)s"', '%(baseslogic)s', '%(ID3)s%(alias)s.%(subclass)s.__init__(self, **kw)', '%(ID3)sif self.pyclass is not None: self.pyclass.__name__ = "%(klass)s_Holder"', ] self.writeArray(map(lambda l: l %kw, element))
python
def _setContent(self): '''GED defines element name, so also define typecode aname ''' kw = KW.copy() try: kw.update(dict(klass=self.getClassName(), element='ElementDeclaration', literal=self.literalTag(), substitutionGroup=self._substitutionGroupTag(), schema=self.schemaTag(), init=self.simpleConstructor(), ns=self.ns, name=self.name, aname=self.getAttributeName(self.name), baseslogic=self.getBasesLogic(ID3), #ofwhat=self.getTypecodeList(), #atypecode=self.attribute_typecode, #pyclass=self.getPyClass(), alias=NAD.getAlias(self.sKlassNS), subclass=type_class_name(self.sKlass), )) except Exception, ex: args = ['Failure processing an element w/local complexType: %s' %( self._item.getItemTrace())] args += ex.args ex.args = tuple(args) raise if self.local: kw['element'] = 'LocalElementDeclaration' element = [ '%(ID1)sclass %(klass)s(%(element)s):', '%(ID2)s%(literal)s', '%(ID2)s%(schema)s', '%(ID2)s%(substitutionGroup)s', '%(ID2)s%(init)s', '%(ID3)skw["pname"] = ("%(ns)s","%(name)s")', '%(ID3)skw["aname"] = "%(aname)s"', '%(baseslogic)s', '%(ID3)s%(alias)s.%(subclass)s.__init__(self, **kw)', '%(ID3)sif self.pyclass is not None: self.pyclass.__name__ = "%(klass)s_Holder"', ] self.writeArray(map(lambda l: l %kw, element))
[ "def", "_setContent", "(", "self", ")", ":", "kw", "=", "KW", ".", "copy", "(", ")", "try", ":", "kw", ".", "update", "(", "dict", "(", "klass", "=", "self", ".", "getClassName", "(", ")", ",", "element", "=", "'ElementDeclaration'", ",", "literal", "=", "self", ".", "literalTag", "(", ")", ",", "substitutionGroup", "=", "self", ".", "_substitutionGroupTag", "(", ")", ",", "schema", "=", "self", ".", "schemaTag", "(", ")", ",", "init", "=", "self", ".", "simpleConstructor", "(", ")", ",", "ns", "=", "self", ".", "ns", ",", "name", "=", "self", ".", "name", ",", "aname", "=", "self", ".", "getAttributeName", "(", "self", ".", "name", ")", ",", "baseslogic", "=", "self", ".", "getBasesLogic", "(", "ID3", ")", ",", "#ofwhat=self.getTypecodeList(),", "#atypecode=self.attribute_typecode,", "#pyclass=self.getPyClass(),", "alias", "=", "NAD", ".", "getAlias", "(", "self", ".", "sKlassNS", ")", ",", "subclass", "=", "type_class_name", "(", "self", ".", "sKlass", ")", ",", ")", ")", "except", "Exception", ",", "ex", ":", "args", "=", "[", "'Failure processing an element w/local complexType: %s'", "%", "(", "self", ".", "_item", ".", "getItemTrace", "(", ")", ")", "]", "args", "+=", "ex", ".", "args", "ex", ".", "args", "=", "tuple", "(", "args", ")", "raise", "if", "self", ".", "local", ":", "kw", "[", "'element'", "]", "=", "'LocalElementDeclaration'", "element", "=", "[", "'%(ID1)sclass %(klass)s(%(element)s):'", ",", "'%(ID2)s%(literal)s'", ",", "'%(ID2)s%(schema)s'", ",", "'%(ID2)s%(substitutionGroup)s'", ",", "'%(ID2)s%(init)s'", ",", "'%(ID3)skw[\"pname\"] = (\"%(ns)s\",\"%(name)s\")'", ",", "'%(ID3)skw[\"aname\"] = \"%(aname)s\"'", ",", "'%(baseslogic)s'", ",", "'%(ID3)s%(alias)s.%(subclass)s.__init__(self, **kw)'", ",", "'%(ID3)sif self.pyclass is not None: self.pyclass.__name__ = \"%(klass)s_Holder\"'", ",", "]", "self", ".", "writeArray", "(", "map", "(", "lambda", "l", ":", "l", "%", "kw", ",", "element", ")", ")" ]
GED defines element name, so also define typecode aname
[ "GED", "defines", "element", "name", "so", "also", "define", "typecode", "aname" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L2294-L2337
244,716
ronaldguillen/wave
wave/utils/model_meta.py
_resolve_model
def _resolve_model(obj): """ Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName' """ if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj))
python
def _resolve_model(obj): """ Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName' """ if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj))
[ "def", "_resolve_model", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", "and", "len", "(", "obj", ".", "split", "(", "'.'", ")", ")", "==", "2", ":", "app_name", ",", "model_name", "=", "obj", ".", "split", "(", "'.'", ")", "resolved_model", "=", "apps", ".", "get_model", "(", "app_name", ",", "model_name", ")", "if", "resolved_model", "is", "None", ":", "msg", "=", "\"Django did not return a model for {0}.{1}\"", "raise", "ImproperlyConfigured", "(", "msg", ".", "format", "(", "app_name", ",", "model_name", ")", ")", "return", "resolved_model", "elif", "inspect", ".", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "models", ".", "Model", ")", ":", "return", "obj", "raise", "ValueError", "(", "\"{0} is not a Django model\"", ".", "format", "(", "obj", ")", ")" ]
Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName'
[ "Resolve", "supplied", "obj", "to", "a", "Django", "model", "class", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/model_meta.py#L38-L59
244,717
ronaldguillen/wave
wave/utils/model_meta.py
is_abstract_model
def is_abstract_model(model): """ Given a model class, returns a boolean True if it is abstract and False if it is not. """ return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
python
def is_abstract_model(model): """ Given a model class, returns a boolean True if it is abstract and False if it is not. """ return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
[ "def", "is_abstract_model", "(", "model", ")", ":", "return", "hasattr", "(", "model", ",", "'_meta'", ")", "and", "hasattr", "(", "model", ".", "_meta", ",", "'abstract'", ")", "and", "model", ".", "_meta", ".", "abstract" ]
Given a model class, returns a boolean True if it is abstract and False if it is not.
[ "Given", "a", "model", "class", "returns", "a", "boolean", "True", "if", "it", "is", "abstract", "and", "False", "if", "it", "is", "not", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/model_meta.py#L187-L191
244,718
praekeltfoundation/seed-control-interface-service
services/tasks.py
QueuePollService.run
def run(self): """ Queues all services to be polled. Should be run via beat. """ services = Service.objects.all() for service in services: poll_service.apply_async(kwargs={"service_id": str(service.id)}) return "Queued <%s> Service(s) for Polling" % services.count()
python
def run(self): """ Queues all services to be polled. Should be run via beat. """ services = Service.objects.all() for service in services: poll_service.apply_async(kwargs={"service_id": str(service.id)}) return "Queued <%s> Service(s) for Polling" % services.count()
[ "def", "run", "(", "self", ")", ":", "services", "=", "Service", ".", "objects", ".", "all", "(", ")", "for", "service", "in", "services", ":", "poll_service", ".", "apply_async", "(", "kwargs", "=", "{", "\"service_id\"", ":", "str", "(", "service", ".", "id", ")", "}", ")", "return", "\"Queued <%s> Service(s) for Polling\"", "%", "services", ".", "count", "(", ")" ]
Queues all services to be polled. Should be run via beat.
[ "Queues", "all", "services", "to", "be", "polled", ".", "Should", "be", "run", "via", "beat", "." ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/services/tasks.py#L81-L88
244,719
praekeltfoundation/seed-control-interface-service
services/tasks.py
GetUserToken.run
def run(self, service_id, user_id, email, **kwargs): """ Create and Retrieve a token from remote service. Save to DB. """ log = self.get_logger(**kwargs) log.info("Loading Service for token creation") try: service = Service.objects.get(id=service_id) log.info("Getting token for <%s> on <%s>" % (email, service.name)) response = self.create_token(service.url, email, service.token) try: result = response.json() ust, created = UserServiceToken.objects.get_or_create( service=service, user_id=user_id, email=email) ust.token = result["token"] ust.save() log.info( "Token saved for <%s> on <%s>" % (email, service.name)) except Exception: # can't decode means there was not a valid response log.info("Failed to parse response from <%s>" % (service.name)) return "Completed getting token for <%s>" % (email) except ObjectDoesNotExist: logger.error('Missing Service', exc_info=True) except SoftTimeLimitExceeded: logger.error( 'Soft time limit exceed processing getting service token \ via Celery.', exc_info=True)
python
def run(self, service_id, user_id, email, **kwargs): """ Create and Retrieve a token from remote service. Save to DB. """ log = self.get_logger(**kwargs) log.info("Loading Service for token creation") try: service = Service.objects.get(id=service_id) log.info("Getting token for <%s> on <%s>" % (email, service.name)) response = self.create_token(service.url, email, service.token) try: result = response.json() ust, created = UserServiceToken.objects.get_or_create( service=service, user_id=user_id, email=email) ust.token = result["token"] ust.save() log.info( "Token saved for <%s> on <%s>" % (email, service.name)) except Exception: # can't decode means there was not a valid response log.info("Failed to parse response from <%s>" % (service.name)) return "Completed getting token for <%s>" % (email) except ObjectDoesNotExist: logger.error('Missing Service', exc_info=True) except SoftTimeLimitExceeded: logger.error( 'Soft time limit exceed processing getting service token \ via Celery.', exc_info=True)
[ "def", "run", "(", "self", ",", "service_id", ",", "user_id", ",", "email", ",", "*", "*", "kwargs", ")", ":", "log", "=", "self", ".", "get_logger", "(", "*", "*", "kwargs", ")", "log", ".", "info", "(", "\"Loading Service for token creation\"", ")", "try", ":", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "log", ".", "info", "(", "\"Getting token for <%s> on <%s>\"", "%", "(", "email", ",", "service", ".", "name", ")", ")", "response", "=", "self", ".", "create_token", "(", "service", ".", "url", ",", "email", ",", "service", ".", "token", ")", "try", ":", "result", "=", "response", ".", "json", "(", ")", "ust", ",", "created", "=", "UserServiceToken", ".", "objects", ".", "get_or_create", "(", "service", "=", "service", ",", "user_id", "=", "user_id", ",", "email", "=", "email", ")", "ust", ".", "token", "=", "result", "[", "\"token\"", "]", "ust", ".", "save", "(", ")", "log", ".", "info", "(", "\"Token saved for <%s> on <%s>\"", "%", "(", "email", ",", "service", ".", "name", ")", ")", "except", "Exception", ":", "# can't decode means there was not a valid response", "log", ".", "info", "(", "\"Failed to parse response from <%s>\"", "%", "(", "service", ".", "name", ")", ")", "return", "\"Completed getting token for <%s>\"", "%", "(", "email", ")", "except", "ObjectDoesNotExist", ":", "logger", ".", "error", "(", "'Missing Service'", ",", "exc_info", "=", "True", ")", "except", "SoftTimeLimitExceeded", ":", "logger", ".", "error", "(", "'Soft time limit exceed processing getting service token \\\n via Celery.'", ",", "exc_info", "=", "True", ")" ]
Create and Retrieve a token from remote service. Save to DB.
[ "Create", "and", "Retrieve", "a", "token", "from", "remote", "service", ".", "Save", "to", "DB", "." ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/services/tasks.py#L195-L225
244,720
praekeltfoundation/seed-control-interface-service
services/tasks.py
QueueServiceMetricSync.run
def run(self): """ Queues all services to be polled for metrics. Should be run via beat. """ services = Service.objects.all() for service in services: service_metric_sync.apply_async( kwargs={"service_id": str(service.id)}) key = "services.downtime.%s.sum" % ( utils.normalise_string(service.name)) check = WidgetData.objects.filter(service=None, key=key) if not check.exists(): WidgetData.objects.create( key=key, title="TEMP - Pending update" ) return "Queued <%s> Service(s) for Metric Sync" % services.count()
python
def run(self): """ Queues all services to be polled for metrics. Should be run via beat. """ services = Service.objects.all() for service in services: service_metric_sync.apply_async( kwargs={"service_id": str(service.id)}) key = "services.downtime.%s.sum" % ( utils.normalise_string(service.name)) check = WidgetData.objects.filter(service=None, key=key) if not check.exists(): WidgetData.objects.create( key=key, title="TEMP - Pending update" ) return "Queued <%s> Service(s) for Metric Sync" % services.count()
[ "def", "run", "(", "self", ")", ":", "services", "=", "Service", ".", "objects", ".", "all", "(", ")", "for", "service", "in", "services", ":", "service_metric_sync", ".", "apply_async", "(", "kwargs", "=", "{", "\"service_id\"", ":", "str", "(", "service", ".", "id", ")", "}", ")", "key", "=", "\"services.downtime.%s.sum\"", "%", "(", "utils", ".", "normalise_string", "(", "service", ".", "name", ")", ")", "check", "=", "WidgetData", ".", "objects", ".", "filter", "(", "service", "=", "None", ",", "key", "=", "key", ")", "if", "not", "check", ".", "exists", "(", ")", ":", "WidgetData", ".", "objects", ".", "create", "(", "key", "=", "key", ",", "title", "=", "\"TEMP - Pending update\"", ")", "return", "\"Queued <%s> Service(s) for Metric Sync\"", "%", "services", ".", "count", "(", ")" ]
Queues all services to be polled for metrics. Should be run via beat.
[ "Queues", "all", "services", "to", "be", "polled", "for", "metrics", ".", "Should", "be", "run", "via", "beat", "." ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/services/tasks.py#L232-L251
244,721
praekeltfoundation/seed-control-interface-service
services/tasks.py
ServiceMetricSync.run
def run(self, service_id, **kwargs): """ Retrieve a list of metrics. Ensure they are set as metric data sources. """ log = self.get_logger(**kwargs) log.info("Loading Service for metric sync") try: service = Service.objects.get(id=service_id) log.info("Getting metrics for <%s>" % (service.name)) metrics = self.get_metrics(service.url, service.token) result = metrics.json() if "metrics_available" in result: for key in result["metrics_available"]: check = WidgetData.objects.filter(service=service, key=key) if not check.exists(): WidgetData.objects.create( service=service, key=key, title="TEMP - Pending update" ) log.info("Add WidgetData for <%s>" % (key,)) return "Completed metric sync for <%s>" % (service.name) except ObjectDoesNotExist: logger.error('Missing Service', exc_info=True) except SoftTimeLimitExceeded: logger.error( 'Soft time limit exceed processing pull of service metrics \ via Celery.', exc_info=True)
python
def run(self, service_id, **kwargs): """ Retrieve a list of metrics. Ensure they are set as metric data sources. """ log = self.get_logger(**kwargs) log.info("Loading Service for metric sync") try: service = Service.objects.get(id=service_id) log.info("Getting metrics for <%s>" % (service.name)) metrics = self.get_metrics(service.url, service.token) result = metrics.json() if "metrics_available" in result: for key in result["metrics_available"]: check = WidgetData.objects.filter(service=service, key=key) if not check.exists(): WidgetData.objects.create( service=service, key=key, title="TEMP - Pending update" ) log.info("Add WidgetData for <%s>" % (key,)) return "Completed metric sync for <%s>" % (service.name) except ObjectDoesNotExist: logger.error('Missing Service', exc_info=True) except SoftTimeLimitExceeded: logger.error( 'Soft time limit exceed processing pull of service metrics \ via Celery.', exc_info=True)
[ "def", "run", "(", "self", ",", "service_id", ",", "*", "*", "kwargs", ")", ":", "log", "=", "self", ".", "get_logger", "(", "*", "*", "kwargs", ")", "log", ".", "info", "(", "\"Loading Service for metric sync\"", ")", "try", ":", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "log", ".", "info", "(", "\"Getting metrics for <%s>\"", "%", "(", "service", ".", "name", ")", ")", "metrics", "=", "self", ".", "get_metrics", "(", "service", ".", "url", ",", "service", ".", "token", ")", "result", "=", "metrics", ".", "json", "(", ")", "if", "\"metrics_available\"", "in", "result", ":", "for", "key", "in", "result", "[", "\"metrics_available\"", "]", ":", "check", "=", "WidgetData", ".", "objects", ".", "filter", "(", "service", "=", "service", ",", "key", "=", "key", ")", "if", "not", "check", ".", "exists", "(", ")", ":", "WidgetData", ".", "objects", ".", "create", "(", "service", "=", "service", ",", "key", "=", "key", ",", "title", "=", "\"TEMP - Pending update\"", ")", "log", ".", "info", "(", "\"Add WidgetData for <%s>\"", "%", "(", "key", ",", ")", ")", "return", "\"Completed metric sync for <%s>\"", "%", "(", "service", ".", "name", ")", "except", "ObjectDoesNotExist", ":", "logger", ".", "error", "(", "'Missing Service'", ",", "exc_info", "=", "True", ")", "except", "SoftTimeLimitExceeded", ":", "logger", ".", "error", "(", "'Soft time limit exceed processing pull of service metrics \\\n via Celery.'", ",", "exc_info", "=", "True", ")" ]
Retrieve a list of metrics. Ensure they are set as metric data sources.
[ "Retrieve", "a", "list", "of", "metrics", ".", "Ensure", "they", "are", "set", "as", "metric", "data", "sources", "." ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/services/tasks.py#L279-L309
244,722
pip-services3-python/pip-services3-components-python
pip_services3_components/count/Timing.py
Timing.end_timing
def end_timing(self): """ Ends timing of an execution block, calculates elapsed time and updates the associated counter. """ if self._callback != None: elapsed = time.perf_counter() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
python
def end_timing(self): """ Ends timing of an execution block, calculates elapsed time and updates the associated counter. """ if self._callback != None: elapsed = time.perf_counter() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
[ "def", "end_timing", "(", "self", ")", ":", "if", "self", ".", "_callback", "!=", "None", ":", "elapsed", "=", "time", ".", "perf_counter", "(", ")", "*", "1000", "-", "self", ".", "_start", "self", ".", "_callback", ".", "end_timing", "(", "self", ".", "_counter", ",", "elapsed", ")" ]
Ends timing of an execution block, calculates elapsed time and updates the associated counter.
[ "Ends", "timing", "of", "an", "execution", "block", "calculates", "elapsed", "time", "and", "updates", "the", "associated", "counter", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/count/Timing.py#L42-L49
244,723
RazerM/bucketcache
bucketcache/buckets.py
Bucket.prune_directory
def prune_directory(self): """Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted. """ glob = '*.{ext}'.format(ext=self.backend.file_extension) totalsize = 0 totalnum = 0 for f in self._path.glob(glob): filesize = f.stat().st_size key_hash = f.stem in_cache = key_hash in self._cache try: self._get_obj_from_hash(key_hash) except KeyExpirationError: # File has been deleted by `_get_obj_from_hash` totalsize += filesize totalnum += 1 except KeyInvalidError: pass except Exception: raise else: if not in_cache: del self._cache[key_hash] return PrunedFilesInfo(size=totalsize, num=totalnum)
python
def prune_directory(self): """Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted. """ glob = '*.{ext}'.format(ext=self.backend.file_extension) totalsize = 0 totalnum = 0 for f in self._path.glob(glob): filesize = f.stat().st_size key_hash = f.stem in_cache = key_hash in self._cache try: self._get_obj_from_hash(key_hash) except KeyExpirationError: # File has been deleted by `_get_obj_from_hash` totalsize += filesize totalnum += 1 except KeyInvalidError: pass except Exception: raise else: if not in_cache: del self._cache[key_hash] return PrunedFilesInfo(size=totalsize, num=totalnum)
[ "def", "prune_directory", "(", "self", ")", ":", "glob", "=", "'*.{ext}'", ".", "format", "(", "ext", "=", "self", ".", "backend", ".", "file_extension", ")", "totalsize", "=", "0", "totalnum", "=", "0", "for", "f", "in", "self", ".", "_path", ".", "glob", "(", "glob", ")", ":", "filesize", "=", "f", ".", "stat", "(", ")", ".", "st_size", "key_hash", "=", "f", ".", "stem", "in_cache", "=", "key_hash", "in", "self", ".", "_cache", "try", ":", "self", ".", "_get_obj_from_hash", "(", "key_hash", ")", "except", "KeyExpirationError", ":", "# File has been deleted by `_get_obj_from_hash`", "totalsize", "+=", "filesize", "totalnum", "+=", "1", "except", "KeyInvalidError", ":", "pass", "except", "Exception", ":", "raise", "else", ":", "if", "not", "in_cache", ":", "del", "self", ".", "_cache", "[", "key_hash", "]", "return", "PrunedFilesInfo", "(", "size", "=", "totalsize", ",", "num", "=", "totalnum", ")" ]
Delete any objects that can be loaded and are expired according to the current lifetime setting. A file will be deleted if the following conditions are met: - The file extension matches :py:meth:`bucketcache.backends.Backend.file_extension` - The object can be loaded by the configured backend. - The object's expiration date has passed. Returns: File size and number of files deleted. :rtype: :py:class:`~bucketcache.utilities.PrunedFilesInfo` .. note:: For any buckets that share directories, ``prune_directory`` will affect files saved with both, if they use the same backend class. This is not destructive, because only files that have expired according to the lifetime of the original bucket are deleted.
[ "Delete", "any", "objects", "that", "can", "be", "loaded", "and", "are", "expired", "according", "to", "the", "current", "lifetime", "setting", "." ]
8d9b163b73da8c498793cce2f22f6a7cbe524d94
https://github.com/RazerM/bucketcache/blob/8d9b163b73da8c498793cce2f22f6a7cbe524d94/bucketcache/buckets.py#L237-L280
244,724
RazerM/bucketcache
bucketcache/buckets.py
DeferredWriteBucket.sync
def sync(self): """Commit deferred writes to file.""" for key_hash, obj in six.iteritems(self._cache): # Objects are checked for expiration in __getitem__, # but we can check here to avoid unnecessary writes. if not obj.has_expired(): file_path = self._path_for_hash(key_hash) with open(str(file_path), self._write_mode) as f: obj.dump(f)
python
def sync(self): """Commit deferred writes to file.""" for key_hash, obj in six.iteritems(self._cache): # Objects are checked for expiration in __getitem__, # but we can check here to avoid unnecessary writes. if not obj.has_expired(): file_path = self._path_for_hash(key_hash) with open(str(file_path), self._write_mode) as f: obj.dump(f)
[ "def", "sync", "(", "self", ")", ":", "for", "key_hash", ",", "obj", "in", "six", ".", "iteritems", "(", "self", ".", "_cache", ")", ":", "# Objects are checked for expiration in __getitem__,", "# but we can check here to avoid unnecessary writes.", "if", "not", "obj", ".", "has_expired", "(", ")", ":", "file_path", "=", "self", ".", "_path_for_hash", "(", "key_hash", ")", "with", "open", "(", "str", "(", "file_path", ")", ",", "self", ".", "_write_mode", ")", "as", "f", ":", "obj", ".", "dump", "(", "f", ")" ]
Commit deferred writes to file.
[ "Commit", "deferred", "writes", "to", "file", "." ]
8d9b163b73da8c498793cce2f22f6a7cbe524d94
https://github.com/RazerM/bucketcache/blob/8d9b163b73da8c498793cce2f22f6a7cbe524d94/bucketcache/buckets.py#L471-L479
244,725
akolpakov/paynova-api-python-client
paynova_api_python_client/paynova.py
Paynova.get_url
def get_url(self, resource, params=None): """ Generate url for request """ # replace placeholders pattern = r'\{(.+?)\}' resource = re.sub(pattern, lambda t: str(params.get(t.group(1), '')), resource) # build url parts = (self.endpoint, '/api/', resource) return '/'.join(map(lambda x: str(x).strip('/'), parts))
python
def get_url(self, resource, params=None): """ Generate url for request """ # replace placeholders pattern = r'\{(.+?)\}' resource = re.sub(pattern, lambda t: str(params.get(t.group(1), '')), resource) # build url parts = (self.endpoint, '/api/', resource) return '/'.join(map(lambda x: str(x).strip('/'), parts))
[ "def", "get_url", "(", "self", ",", "resource", ",", "params", "=", "None", ")", ":", "# replace placeholders", "pattern", "=", "r'\\{(.+?)\\}'", "resource", "=", "re", ".", "sub", "(", "pattern", ",", "lambda", "t", ":", "str", "(", "params", ".", "get", "(", "t", ".", "group", "(", "1", ")", ",", "''", ")", ")", ",", "resource", ")", "# build url", "parts", "=", "(", "self", ".", "endpoint", ",", "'/api/'", ",", "resource", ")", "return", "'/'", ".", "join", "(", "map", "(", "lambda", "x", ":", "str", "(", "x", ")", ".", "strip", "(", "'/'", ")", ",", "parts", ")", ")" ]
Generate url for request
[ "Generate", "url", "for", "request" ]
930277623fc7b142ae9365a44f15a3a7b79bd974
https://github.com/akolpakov/paynova-api-python-client/blob/930277623fc7b142ae9365a44f15a3a7b79bd974/paynova_api_python_client/paynova.py#L37-L51
244,726
akolpakov/paynova-api-python-client
paynova_api_python_client/paynova.py
Paynova.request
def request(self, method, resource, params=None): """ Make request to the server and parse response """ url = self.get_url(resource, params) # headers headers = { 'Content-Type': 'application/json' } auth = requests.auth.HTTPBasicAuth(self.username, self.password) # request log.info('Request to %s. Data: %s' % (url, params)) response = requests.request(method, url, data=json.dumps(params), headers=headers, auth=auth) response.raise_for_status() # response log.info('Response from %s: %s' % (url, response.text)) content = response.json() self.parse_status(content.get('status')) return content
python
def request(self, method, resource, params=None): """ Make request to the server and parse response """ url = self.get_url(resource, params) # headers headers = { 'Content-Type': 'application/json' } auth = requests.auth.HTTPBasicAuth(self.username, self.password) # request log.info('Request to %s. Data: %s' % (url, params)) response = requests.request(method, url, data=json.dumps(params), headers=headers, auth=auth) response.raise_for_status() # response log.info('Response from %s: %s' % (url, response.text)) content = response.json() self.parse_status(content.get('status')) return content
[ "def", "request", "(", "self", ",", "method", ",", "resource", ",", "params", "=", "None", ")", ":", "url", "=", "self", ".", "get_url", "(", "resource", ",", "params", ")", "# headers", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "auth", "=", "requests", ".", "auth", ".", "HTTPBasicAuth", "(", "self", ".", "username", ",", "self", ".", "password", ")", "# request", "log", ".", "info", "(", "'Request to %s. Data: %s'", "%", "(", "url", ",", "params", ")", ")", "response", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "data", "=", "json", ".", "dumps", "(", "params", ")", ",", "headers", "=", "headers", ",", "auth", "=", "auth", ")", "response", ".", "raise_for_status", "(", ")", "# response", "log", ".", "info", "(", "'Response from %s: %s'", "%", "(", "url", ",", "response", ".", "text", ")", ")", "content", "=", "response", ".", "json", "(", ")", "self", ".", "parse_status", "(", "content", ".", "get", "(", "'status'", ")", ")", "return", "content" ]
Make request to the server and parse response
[ "Make", "request", "to", "the", "server", "and", "parse", "response" ]
930277623fc7b142ae9365a44f15a3a7b79bd974
https://github.com/akolpakov/paynova-api-python-client/blob/930277623fc7b142ae9365a44f15a3a7b79bd974/paynova_api_python_client/paynova.py#L62-L91
244,727
delfick/aws_syncr
aws_syncr/amazon/iam.py
Iam.modify_attached_policies
def modify_attached_policies(self, role_name, new_policies): """Make sure this role has just the new policies""" parts = role_name.split('/', 1) if len(parts) == 2: prefix, name = parts prefix = "/{0}/".format(prefix) else: prefix = "/" name = parts[0] current_attached_policies = [] with self.ignore_missing(): current_attached_policies = self.client.list_attached_role_policies(RoleName=name) current_attached_policies = [p['PolicyArn'] for p in current_attached_policies["AttachedPolicies"]] new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies] changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies)) if changes: with self.catch_boto_400("Couldn't modify attached policies", role=role_name): for policy in new_attached_policies: if policy not in current_attached_policies: for _ in self.change("+", "attached_policy", role=role_name, policy=policy): self.client.attach_role_policy(RoleName=name, PolicyArn=policy) for policy in current_attached_policies: if policy not in new_attached_policies: for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy): self.client.detach_role_policy(RoleName=name, PolicyArn=policy)
python
def modify_attached_policies(self, role_name, new_policies): """Make sure this role has just the new policies""" parts = role_name.split('/', 1) if len(parts) == 2: prefix, name = parts prefix = "/{0}/".format(prefix) else: prefix = "/" name = parts[0] current_attached_policies = [] with self.ignore_missing(): current_attached_policies = self.client.list_attached_role_policies(RoleName=name) current_attached_policies = [p['PolicyArn'] for p in current_attached_policies["AttachedPolicies"]] new_attached_policies = ["arn:aws:iam::aws:policy/{0}".format(p) for p in new_policies] changes = list(Differ.compare_two_documents(current_attached_policies, new_attached_policies)) if changes: with self.catch_boto_400("Couldn't modify attached policies", role=role_name): for policy in new_attached_policies: if policy not in current_attached_policies: for _ in self.change("+", "attached_policy", role=role_name, policy=policy): self.client.attach_role_policy(RoleName=name, PolicyArn=policy) for policy in current_attached_policies: if policy not in new_attached_policies: for _ in self.change("-", "attached_policy", role=role_name, changes=changes, policy=policy): self.client.detach_role_policy(RoleName=name, PolicyArn=policy)
[ "def", "modify_attached_policies", "(", "self", ",", "role_name", ",", "new_policies", ")", ":", "parts", "=", "role_name", ".", "split", "(", "'/'", ",", "1", ")", "if", "len", "(", "parts", ")", "==", "2", ":", "prefix", ",", "name", "=", "parts", "prefix", "=", "\"/{0}/\"", ".", "format", "(", "prefix", ")", "else", ":", "prefix", "=", "\"/\"", "name", "=", "parts", "[", "0", "]", "current_attached_policies", "=", "[", "]", "with", "self", ".", "ignore_missing", "(", ")", ":", "current_attached_policies", "=", "self", ".", "client", ".", "list_attached_role_policies", "(", "RoleName", "=", "name", ")", "current_attached_policies", "=", "[", "p", "[", "'PolicyArn'", "]", "for", "p", "in", "current_attached_policies", "[", "\"AttachedPolicies\"", "]", "]", "new_attached_policies", "=", "[", "\"arn:aws:iam::aws:policy/{0}\"", ".", "format", "(", "p", ")", "for", "p", "in", "new_policies", "]", "changes", "=", "list", "(", "Differ", ".", "compare_two_documents", "(", "current_attached_policies", ",", "new_attached_policies", ")", ")", "if", "changes", ":", "with", "self", ".", "catch_boto_400", "(", "\"Couldn't modify attached policies\"", ",", "role", "=", "role_name", ")", ":", "for", "policy", "in", "new_attached_policies", ":", "if", "policy", "not", "in", "current_attached_policies", ":", "for", "_", "in", "self", ".", "change", "(", "\"+\"", ",", "\"attached_policy\"", ",", "role", "=", "role_name", ",", "policy", "=", "policy", ")", ":", "self", ".", "client", ".", "attach_role_policy", "(", "RoleName", "=", "name", ",", "PolicyArn", "=", "policy", ")", "for", "policy", "in", "current_attached_policies", ":", "if", "policy", "not", "in", "new_attached_policies", ":", "for", "_", "in", "self", ".", "change", "(", "\"-\"", ",", "\"attached_policy\"", ",", "role", "=", "role_name", ",", "changes", "=", "changes", ",", "policy", "=", "policy", ")", ":", "self", ".", "client", ".", "detach_role_policy", "(", "RoleName", "=", "name", ",", "PolicyArn", "=", "policy", ")" ]
Make sure this role has just the new policies
[ "Make", "sure", "this", "role", "has", "just", "the", "new", "policies" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/amazon/iam.py#L128-L156
244,728
delfick/aws_syncr
aws_syncr/amazon/iam.py
Iam.assume_role_credentials
def assume_role_credentials(self, arn): """Return the environment variables for an assumed role""" log.info("Assuming role as %s", arn) # Clear out empty values for name in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']: if name in os.environ and not os.environ[name]: del os.environ[name] sts = self.amazon.session.client("sts") with self.catch_boto_400("Couldn't assume role", arn=arn): creds = sts.assume_role(RoleArn=arn, RoleSessionName="aws_syncr") return { 'AWS_ACCESS_KEY_ID': creds["Credentials"]["AccessKeyId"] , 'AWS_SECRET_ACCESS_KEY': creds["Credentials"]["SecretAccessKey"] , 'AWS_SECURITY_TOKEN': creds["Credentials"]["SessionToken"] , 'AWS_SESSION_TOKEN': creds["Credentials"]["SessionToken"] }
python
def assume_role_credentials(self, arn): """Return the environment variables for an assumed role""" log.info("Assuming role as %s", arn) # Clear out empty values for name in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']: if name in os.environ and not os.environ[name]: del os.environ[name] sts = self.amazon.session.client("sts") with self.catch_boto_400("Couldn't assume role", arn=arn): creds = sts.assume_role(RoleArn=arn, RoleSessionName="aws_syncr") return { 'AWS_ACCESS_KEY_ID': creds["Credentials"]["AccessKeyId"] , 'AWS_SECRET_ACCESS_KEY': creds["Credentials"]["SecretAccessKey"] , 'AWS_SECURITY_TOKEN': creds["Credentials"]["SessionToken"] , 'AWS_SESSION_TOKEN': creds["Credentials"]["SessionToken"] }
[ "def", "assume_role_credentials", "(", "self", ",", "arn", ")", ":", "log", ".", "info", "(", "\"Assuming role as %s\"", ",", "arn", ")", "# Clear out empty values", "for", "name", "in", "[", "'AWS_ACCESS_KEY_ID'", ",", "'AWS_SECRET_ACCESS_KEY'", ",", "'AWS_SECURITY_TOKEN'", ",", "'AWS_SESSION_TOKEN'", "]", ":", "if", "name", "in", "os", ".", "environ", "and", "not", "os", ".", "environ", "[", "name", "]", ":", "del", "os", ".", "environ", "[", "name", "]", "sts", "=", "self", ".", "amazon", ".", "session", ".", "client", "(", "\"sts\"", ")", "with", "self", ".", "catch_boto_400", "(", "\"Couldn't assume role\"", ",", "arn", "=", "arn", ")", ":", "creds", "=", "sts", ".", "assume_role", "(", "RoleArn", "=", "arn", ",", "RoleSessionName", "=", "\"aws_syncr\"", ")", "return", "{", "'AWS_ACCESS_KEY_ID'", ":", "creds", "[", "\"Credentials\"", "]", "[", "\"AccessKeyId\"", "]", ",", "'AWS_SECRET_ACCESS_KEY'", ":", "creds", "[", "\"Credentials\"", "]", "[", "\"SecretAccessKey\"", "]", ",", "'AWS_SECURITY_TOKEN'", ":", "creds", "[", "\"Credentials\"", "]", "[", "\"SessionToken\"", "]", ",", "'AWS_SESSION_TOKEN'", ":", "creds", "[", "\"Credentials\"", "]", "[", "\"SessionToken\"", "]", "}" ]
Return the environment variables for an assumed role
[ "Return", "the", "environment", "variables", "for", "an", "assumed", "role" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/amazon/iam.py#L158-L176
244,729
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._validate_token
def _validate_token(self): ''' a method to validate active access token ''' title = '%s._validate_token' % self.__class__.__name__ # construct access token url import requests url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % self.access_token # retrieve access token details try: token_details = requests.get(url).json() except: raise DriveConnectionError(title) if 'error' in token_details.keys(): raise ValueError('access_token for google drive account is %s' % token_details['error_description']) # determine collection space # https://developers.google.com/drive/v3/web/about-organization if 'scope' in token_details.keys(): service_scope = token_details['scope'] if service_scope.find('drive.appfolder') > -1: self.drive_space = 'appDataFolder' if not self.collection_name: self.collection_name = 'App Data Folder' elif service_scope.find('drive.photos.readonly') > -1: self.drive_space = 'photos' if not self.collection_name: self.collection_name = 'Photos' # determine permissions # https://developers.google.com/drive/v3/web/about-auth if service_scope.find('readonly') > -1: self.permissions_write = False if service_scope.find('readonly.metadata') > -1: self.permissions_content = False # TODO refresh token if 'expires_in' in token_details.keys(): from time import time expiration_date = time() + token_details['expires_in'] if 'issued_to' in token_details.keys(): client_id = token_details['issued_to'] return token_details
python
def _validate_token(self): ''' a method to validate active access token ''' title = '%s._validate_token' % self.__class__.__name__ # construct access token url import requests url = 'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % self.access_token # retrieve access token details try: token_details = requests.get(url).json() except: raise DriveConnectionError(title) if 'error' in token_details.keys(): raise ValueError('access_token for google drive account is %s' % token_details['error_description']) # determine collection space # https://developers.google.com/drive/v3/web/about-organization if 'scope' in token_details.keys(): service_scope = token_details['scope'] if service_scope.find('drive.appfolder') > -1: self.drive_space = 'appDataFolder' if not self.collection_name: self.collection_name = 'App Data Folder' elif service_scope.find('drive.photos.readonly') > -1: self.drive_space = 'photos' if not self.collection_name: self.collection_name = 'Photos' # determine permissions # https://developers.google.com/drive/v3/web/about-auth if service_scope.find('readonly') > -1: self.permissions_write = False if service_scope.find('readonly.metadata') > -1: self.permissions_content = False # TODO refresh token if 'expires_in' in token_details.keys(): from time import time expiration_date = time() + token_details['expires_in'] if 'issued_to' in token_details.keys(): client_id = token_details['issued_to'] return token_details
[ "def", "_validate_token", "(", "self", ")", ":", "title", "=", "'%s._validate_token'", "%", "self", ".", "__class__", ".", "__name__", "# construct access token url", "import", "requests", "url", "=", "'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'", "%", "self", ".", "access_token", "# retrieve access token details", "try", ":", "token_details", "=", "requests", ".", "get", "(", "url", ")", ".", "json", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "if", "'error'", "in", "token_details", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'access_token for google drive account is %s'", "%", "token_details", "[", "'error_description'", "]", ")", "# determine collection space", "# https://developers.google.com/drive/v3/web/about-organization", "if", "'scope'", "in", "token_details", ".", "keys", "(", ")", ":", "service_scope", "=", "token_details", "[", "'scope'", "]", "if", "service_scope", ".", "find", "(", "'drive.appfolder'", ")", ">", "-", "1", ":", "self", ".", "drive_space", "=", "'appDataFolder'", "if", "not", "self", ".", "collection_name", ":", "self", ".", "collection_name", "=", "'App Data Folder'", "elif", "service_scope", ".", "find", "(", "'drive.photos.readonly'", ")", ">", "-", "1", ":", "self", ".", "drive_space", "=", "'photos'", "if", "not", "self", ".", "collection_name", ":", "self", ".", "collection_name", "=", "'Photos'", "# determine permissions", "# https://developers.google.com/drive/v3/web/about-auth", "if", "service_scope", ".", "find", "(", "'readonly'", ")", ">", "-", "1", ":", "self", ".", "permissions_write", "=", "False", "if", "service_scope", ".", "find", "(", "'readonly.metadata'", ")", ">", "-", "1", ":", "self", ".", "permissions_content", "=", "False", "# TODO refresh token", "if", "'expires_in'", "in", "token_details", ".", "keys", "(", ")", ":", "from", "time", "import", "time", "expiration_date", "=", "time", "(", ")", "+", "token_details", "[", "'expires_in'", "]", "if", "'issued_to'", "in", "token_details", ".", "keys", "(", ")", ":", "client_id", "=", "token_details", "[", "'issued_to'", "]", "return", "token_details" ]
a method to validate active access token
[ "a", "method", "to", "validate", "active", "access", "token" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L189-L234
244,730
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._get_id
def _get_id(self, file_path): ''' a helper method for retrieving id of file or folder ''' title = '%s._get_id' % self.__class__.__name__ # construct request kwargs list_kwargs = { 'spaces': self.drive_space, 'fields': 'files(id, parents)' } # determine path segments path_segments = file_path.split(os.sep) # walk down parents to file name parent_id = '' empty_string = '' while path_segments: walk_query = "name = '%s'" % path_segments.pop(0) if parent_id: walk_query += "and '%s' in parents" % parent_id list_kwargs['q'] = walk_query try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) file_list = response.get('files', []) if file_list: if path_segments: parent_id = file_list[0].get('id') else: file_id = file_list[0].get('id') return file_id, parent_id else: return empty_string, empty_string
python
def _get_id(self, file_path): ''' a helper method for retrieving id of file or folder ''' title = '%s._get_id' % self.__class__.__name__ # construct request kwargs list_kwargs = { 'spaces': self.drive_space, 'fields': 'files(id, parents)' } # determine path segments path_segments = file_path.split(os.sep) # walk down parents to file name parent_id = '' empty_string = '' while path_segments: walk_query = "name = '%s'" % path_segments.pop(0) if parent_id: walk_query += "and '%s' in parents" % parent_id list_kwargs['q'] = walk_query try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) file_list = response.get('files', []) if file_list: if path_segments: parent_id = file_list[0].get('id') else: file_id = file_list[0].get('id') return file_id, parent_id else: return empty_string, empty_string
[ "def", "_get_id", "(", "self", ",", "file_path", ")", ":", "title", "=", "'%s._get_id'", "%", "self", ".", "__class__", ".", "__name__", "# construct request kwargs", "list_kwargs", "=", "{", "'spaces'", ":", "self", ".", "drive_space", ",", "'fields'", ":", "'files(id, parents)'", "}", "# determine path segments", "path_segments", "=", "file_path", ".", "split", "(", "os", ".", "sep", ")", "# walk down parents to file name", "parent_id", "=", "''", "empty_string", "=", "''", "while", "path_segments", ":", "walk_query", "=", "\"name = '%s'\"", "%", "path_segments", ".", "pop", "(", "0", ")", "if", "parent_id", ":", "walk_query", "+=", "\"and '%s' in parents\"", "%", "parent_id", "list_kwargs", "[", "'q'", "]", "=", "walk_query", "try", ":", "response", "=", "self", ".", "drive", ".", "list", "(", "*", "*", "list_kwargs", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "file_list", "=", "response", ".", "get", "(", "'files'", ",", "[", "]", ")", "if", "file_list", ":", "if", "path_segments", ":", "parent_id", "=", "file_list", "[", "0", "]", ".", "get", "(", "'id'", ")", "else", ":", "file_id", "=", "file_list", "[", "0", "]", ".", "get", "(", "'id'", ")", "return", "file_id", ",", "parent_id", "else", ":", "return", "empty_string", ",", "empty_string" ]
a helper method for retrieving id of file or folder
[ "a", "helper", "method", "for", "retrieving", "id", "of", "file", "or", "folder" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L236-L271
244,731
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._get_space
def _get_space(self): ''' a helper method to retrieve id of drive space ''' title = '%s._space_id' % self.__class__.__name__ list_kwargs = { 'q': "'%s' in parents" % self.drive_space, 'spaces': self.drive_space, 'fields': 'files(name, parents)', 'pageSize': 1 } try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) for file in response.get('files',[]): self.space_id = file.get('parents')[0] break return self.space_id
python
def _get_space(self): ''' a helper method to retrieve id of drive space ''' title = '%s._space_id' % self.__class__.__name__ list_kwargs = { 'q': "'%s' in parents" % self.drive_space, 'spaces': self.drive_space, 'fields': 'files(name, parents)', 'pageSize': 1 } try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) for file in response.get('files',[]): self.space_id = file.get('parents')[0] break return self.space_id
[ "def", "_get_space", "(", "self", ")", ":", "title", "=", "'%s._space_id'", "%", "self", ".", "__class__", ".", "__name__", "list_kwargs", "=", "{", "'q'", ":", "\"'%s' in parents\"", "%", "self", ".", "drive_space", ",", "'spaces'", ":", "self", ".", "drive_space", ",", "'fields'", ":", "'files(name, parents)'", ",", "'pageSize'", ":", "1", "}", "try", ":", "response", "=", "self", ".", "drive", ".", "list", "(", "*", "*", "list_kwargs", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "for", "file", "in", "response", ".", "get", "(", "'files'", ",", "[", "]", ")", ":", "self", ".", "space_id", "=", "file", ".", "get", "(", "'parents'", ")", "[", "0", "]", "break", "return", "self", ".", "space_id" ]
a helper method to retrieve id of drive space
[ "a", "helper", "method", "to", "retrieve", "id", "of", "drive", "space" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L273-L291
244,732
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._get_data
def _get_data(self, file_id): ''' a helper method for retrieving the byte data of a file ''' title = '%s._get_data' % self.__class__.__name__ # request file data try: record_data = self.drive.get_media(fileId=file_id).execute() except: raise DriveConnectionError(title) return record_data
python
def _get_data(self, file_id): ''' a helper method for retrieving the byte data of a file ''' title = '%s._get_data' % self.__class__.__name__ # request file data try: record_data = self.drive.get_media(fileId=file_id).execute() except: raise DriveConnectionError(title) return record_data
[ "def", "_get_data", "(", "self", ",", "file_id", ")", ":", "title", "=", "'%s._get_data'", "%", "self", ".", "__class__", ".", "__name__", "# request file data", "try", ":", "record_data", "=", "self", ".", "drive", ".", "get_media", "(", "fileId", "=", "file_id", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "return", "record_data" ]
a helper method for retrieving the byte data of a file
[ "a", "helper", "method", "for", "retrieving", "the", "byte", "data", "of", "a", "file" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L293-L305
244,733
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._get_metadata
def _get_metadata(self, file_id, metadata_fields=''): ''' a helper method for retrieving the metadata of a file ''' title = '%s._get_metadata' % self.__class__.__name__ # construct fields arg if not metadata_fields: metadata_fields = ','.join(self.object_file.keys()) else: field_list = metadata_fields.split(',') for field in field_list: if not field in self.object_file.keys(): raise ValueError('%s(metadata_fields="%s") is not a valid drive file field' % (title, field)) # send request try: metadata_details = self.drive.get(fileId=file_id, fields=metadata_fields).execute() except: raise DriveConnectionError(title) return metadata_details
python
def _get_metadata(self, file_id, metadata_fields=''): ''' a helper method for retrieving the metadata of a file ''' title = '%s._get_metadata' % self.__class__.__name__ # construct fields arg if not metadata_fields: metadata_fields = ','.join(self.object_file.keys()) else: field_list = metadata_fields.split(',') for field in field_list: if not field in self.object_file.keys(): raise ValueError('%s(metadata_fields="%s") is not a valid drive file field' % (title, field)) # send request try: metadata_details = self.drive.get(fileId=file_id, fields=metadata_fields).execute() except: raise DriveConnectionError(title) return metadata_details
[ "def", "_get_metadata", "(", "self", ",", "file_id", ",", "metadata_fields", "=", "''", ")", ":", "title", "=", "'%s._get_metadata'", "%", "self", ".", "__class__", ".", "__name__", "# construct fields arg", "if", "not", "metadata_fields", ":", "metadata_fields", "=", "','", ".", "join", "(", "self", ".", "object_file", ".", "keys", "(", ")", ")", "else", ":", "field_list", "=", "metadata_fields", ".", "split", "(", "','", ")", "for", "field", "in", "field_list", ":", "if", "not", "field", "in", "self", ".", "object_file", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'%s(metadata_fields=\"%s\") is not a valid drive file field'", "%", "(", "title", ",", "field", ")", ")", "# send request", "try", ":", "metadata_details", "=", "self", ".", "drive", ".", "get", "(", "fileId", "=", "file_id", ",", "fields", "=", "metadata_fields", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "return", "metadata_details" ]
a helper method for retrieving the metadata of a file
[ "a", "helper", "method", "for", "retrieving", "the", "metadata", "of", "a", "file" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L307-L328
244,734
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._list_directory
def _list_directory(self, folder_id=''): ''' a generator method for listing the contents of a directory ''' title = '%s._list_directory' % self.__class__.__name__ # construct default response file_list = [] # construct request kwargs list_kwargs = { 'spaces': self.drive_space, 'fields': 'nextPageToken, files(id, name, parents, mimeType)' } # add query field for parent if folder_id: list_kwargs['q'] = "'%s' in parents" % folder_id # retrieve space id if not self.space_id: self._get_space() # send request page_token = 1 while page_token: try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) # populate list from response results = response.get('files', []) for file in results: if not folder_id and file.get('parents', [])[0] != self.space_id: pass else: yield file.get('id', ''), file.get('name', ''), file.get('mimeType', '') # get page token page_token = response.get('nextPageToken', None) if page_token: list_kwargs['pageToken'] = page_token return file_list
python
def _list_directory(self, folder_id=''): ''' a generator method for listing the contents of a directory ''' title = '%s._list_directory' % self.__class__.__name__ # construct default response file_list = [] # construct request kwargs list_kwargs = { 'spaces': self.drive_space, 'fields': 'nextPageToken, files(id, name, parents, mimeType)' } # add query field for parent if folder_id: list_kwargs['q'] = "'%s' in parents" % folder_id # retrieve space id if not self.space_id: self._get_space() # send request page_token = 1 while page_token: try: response = self.drive.list(**list_kwargs).execute() except: raise DriveConnectionError(title) # populate list from response results = response.get('files', []) for file in results: if not folder_id and file.get('parents', [])[0] != self.space_id: pass else: yield file.get('id', ''), file.get('name', ''), file.get('mimeType', '') # get page token page_token = response.get('nextPageToken', None) if page_token: list_kwargs['pageToken'] = page_token return file_list
[ "def", "_list_directory", "(", "self", ",", "folder_id", "=", "''", ")", ":", "title", "=", "'%s._list_directory'", "%", "self", ".", "__class__", ".", "__name__", "# construct default response", "file_list", "=", "[", "]", "# construct request kwargs", "list_kwargs", "=", "{", "'spaces'", ":", "self", ".", "drive_space", ",", "'fields'", ":", "'nextPageToken, files(id, name, parents, mimeType)'", "}", "# add query field for parent", "if", "folder_id", ":", "list_kwargs", "[", "'q'", "]", "=", "\"'%s' in parents\"", "%", "folder_id", "# retrieve space id", "if", "not", "self", ".", "space_id", ":", "self", ".", "_get_space", "(", ")", "# send request", "page_token", "=", "1", "while", "page_token", ":", "try", ":", "response", "=", "self", ".", "drive", ".", "list", "(", "*", "*", "list_kwargs", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "# populate list from response", "results", "=", "response", ".", "get", "(", "'files'", ",", "[", "]", ")", "for", "file", "in", "results", ":", "if", "not", "folder_id", "and", "file", ".", "get", "(", "'parents'", ",", "[", "]", ")", "[", "0", "]", "!=", "self", ".", "space_id", ":", "pass", "else", ":", "yield", "file", ".", "get", "(", "'id'", ",", "''", ")", ",", "file", ".", "get", "(", "'name'", ",", "''", ")", ",", "file", ".", "get", "(", "'mimeType'", ",", "''", ")", "# get page token", "page_token", "=", "response", ".", "get", "(", "'nextPageToken'", ",", "None", ")", "if", "page_token", ":", "list_kwargs", "[", "'pageToken'", "]", "=", "page_token", "return", "file_list" ]
a generator method for listing the contents of a directory
[ "a", "generator", "method", "for", "listing", "the", "contents", "of", "a", "directory" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L330-L374
244,735
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient._walk
def _walk(self, root_path='', root_id=''): ''' a generator method which walks the file structure of the dropbox collection ''' title = '%s._walk' % self.__class__.__name__ if root_id: pass elif root_path: root_id, root_parent = self._get_id(root_path) for file_id, name, mimetype in self._list_directory(root_id): file_path = os.path.join(root_path, name) if mimetype == 'application/vnd.google-apps.folder': for path, id in self._walk(root_path=file_path, root_id=file_id): yield path, id else: yield file_path, file_id
python
def _walk(self, root_path='', root_id=''): ''' a generator method which walks the file structure of the dropbox collection ''' title = '%s._walk' % self.__class__.__name__ if root_id: pass elif root_path: root_id, root_parent = self._get_id(root_path) for file_id, name, mimetype in self._list_directory(root_id): file_path = os.path.join(root_path, name) if mimetype == 'application/vnd.google-apps.folder': for path, id in self._walk(root_path=file_path, root_id=file_id): yield path, id else: yield file_path, file_id
[ "def", "_walk", "(", "self", ",", "root_path", "=", "''", ",", "root_id", "=", "''", ")", ":", "title", "=", "'%s._walk'", "%", "self", ".", "__class__", ".", "__name__", "if", "root_id", ":", "pass", "elif", "root_path", ":", "root_id", ",", "root_parent", "=", "self", ".", "_get_id", "(", "root_path", ")", "for", "file_id", ",", "name", ",", "mimetype", "in", "self", ".", "_list_directory", "(", "root_id", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "root_path", ",", "name", ")", "if", "mimetype", "==", "'application/vnd.google-apps.folder'", ":", "for", "path", ",", "id", "in", "self", ".", "_walk", "(", "root_path", "=", "file_path", ",", "root_id", "=", "file_id", ")", ":", "yield", "path", ",", "id", "else", ":", "yield", "file_path", ",", "file_id" ]
a generator method which walks the file structure of the dropbox collection
[ "a", "generator", "method", "which", "walks", "the", "file", "structure", "of", "the", "dropbox", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L376-L392
244,736
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient.load
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify permissions if not self.permissions_content: raise Exception('%s requires an access_token with file content permissions.' % title) # retrieve file id file_id, parent_id = self._get_id(record_key) if not file_id: raise Exception('%s(record_key=%s) does not exist.' % (title, record_key)) # request file data try: record_data = self.drive.get_media(fileId=file_id).execute() except: raise DriveConnectionError(title) # retrieve data from response # import io # from googleapiclient.http import MediaIoBaseDownload # file_header = io.BytesIO # record_data = MediaIoBaseDownload(file_header, response) # done = False # while not done: # status, done = record_data.next_chunk() # TODO export google document to other format # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
python
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verify permissions if not self.permissions_content: raise Exception('%s requires an access_token with file content permissions.' % title) # retrieve file id file_id, parent_id = self._get_id(record_key) if not file_id: raise Exception('%s(record_key=%s) does not exist.' % (title, record_key)) # request file data try: record_data = self.drive.get_media(fileId=file_id).execute() except: raise DriveConnectionError(title) # retrieve data from response # import io # from googleapiclient.http import MediaIoBaseDownload # file_header = io.BytesIO # record_data = MediaIoBaseDownload(file_header, response) # done = False # while not done: # status, done = record_data.next_chunk() # TODO export google document to other format # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
[ "def", "load", "(", "self", ",", "record_key", ",", "secret_key", "=", "''", ")", ":", "title", "=", "'%s.load'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", ",", "'secret_key'", ":", "secret_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# verify permissions", "if", "not", "self", ".", "permissions_content", ":", "raise", "Exception", "(", "'%s requires an access_token with file content permissions.'", "%", "title", ")", "# retrieve file id", "file_id", ",", "parent_id", "=", "self", ".", "_get_id", "(", "record_key", ")", "if", "not", "file_id", ":", "raise", "Exception", "(", "'%s(record_key=%s) does not exist.'", "%", "(", "title", ",", "record_key", ")", ")", "# request file data", "try", ":", "record_data", "=", "self", ".", "drive", ".", "get_media", "(", "fileId", "=", "file_id", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "# retrieve data from response", "# import io", "# from googleapiclient.http import MediaIoBaseDownload", "# file_header = io.BytesIO", "# record_data = MediaIoBaseDownload(file_header, response)", "# done = False", "# while not done:", "# status, done = record_data.next_chunk()", "# TODO export google document to other format", "# decrypt (if necessary)", "if", "secret_key", ":", "from", "labpack", ".", "encryption", "import", "cryptolab", "record_data", "=", "cryptolab", ".", "decrypt", "(", "record_data", ",", "secret_key", ")", "return", "record_data" ]
a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body
[ "a", "method", "to", "retrieve", "byte", "data", "of", "appdata", "record" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L727-L780
244,737
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient.list
def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''): ''' a method to list keys in the google drive collection :param prefix: string with prefix value to filter results :param delimiter: string with value which results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. ''' title = '%s.list' % self.__class__.__name__ # validate input input_fields = { 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results, 'previous_key': previous_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate filter function if filter_function: try: path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ] filter_function(*path_segments) except: err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__) raise TypeError('%s must accept positional arguments.' % err_msg) # construct empty results list results_list = [] check_key = True if previous_key: check_key = False # determine root path root_path = '' if prefix: from os import path root_path, file_name = path.split(prefix) # iterate over dropbox files for file_path, file_id in self._walk(root_path): path_segments = file_path.split(os.sep) record_key = os.path.join(*path_segments) record_key = record_key.replace('\\','/') if record_key == previous_key: check_key = True # find starting point if not check_key: continue # apply prefix filter partial_key = record_key if prefix: if record_key.find(prefix) == 0: partial_key = record_key[len(prefix):] else: continue # apply delimiter filter if delimiter: if partial_key.find(delimiter) > -1: continue # apply filter function if filter_function: if filter_function(*path_segments): results_list.append(record_key) else: results_list.append(record_key) # return results list if len(results_list) == max_results: return results_list return results_list
python
def list(self, prefix='', delimiter='', filter_function=None, max_results=1, previous_key=''): ''' a method to list keys in the google drive collection :param prefix: string with prefix value to filter results :param delimiter: string with value which results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. ''' title = '%s.list' % self.__class__.__name__ # validate input input_fields = { 'prefix': prefix, 'delimiter': delimiter, 'max_results': max_results, 'previous_key': previous_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate filter function if filter_function: try: path_segments = [ 'lab', 'unittests', '1473719695.2165067', '.json' ] filter_function(*path_segments) except: err_msg = '%s(filter_function=%s)' % (title, filter_function.__class__.__name__) raise TypeError('%s must accept positional arguments.' % err_msg) # construct empty results list results_list = [] check_key = True if previous_key: check_key = False # determine root path root_path = '' if prefix: from os import path root_path, file_name = path.split(prefix) # iterate over dropbox files for file_path, file_id in self._walk(root_path): path_segments = file_path.split(os.sep) record_key = os.path.join(*path_segments) record_key = record_key.replace('\\','/') if record_key == previous_key: check_key = True # find starting point if not check_key: continue # apply prefix filter partial_key = record_key if prefix: if record_key.find(prefix) == 0: partial_key = record_key[len(prefix):] else: continue # apply delimiter filter if delimiter: if partial_key.find(delimiter) > -1: continue # apply filter function if filter_function: if filter_function(*path_segments): results_list.append(record_key) else: results_list.append(record_key) # return results list if len(results_list) == max_results: return results_list return results_list
[ "def", "list", "(", "self", ",", "prefix", "=", "''", ",", "delimiter", "=", "''", ",", "filter_function", "=", "None", ",", "max_results", "=", "1", ",", "previous_key", "=", "''", ")", ":", "title", "=", "'%s.list'", "%", "self", ".", "__class__", ".", "__name__", "# validate input", "input_fields", "=", "{", "'prefix'", ":", "prefix", ",", "'delimiter'", ":", "delimiter", ",", "'max_results'", ":", "max_results", ",", "'previous_key'", ":", "previous_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# validate filter function", "if", "filter_function", ":", "try", ":", "path_segments", "=", "[", "'lab'", ",", "'unittests'", ",", "'1473719695.2165067'", ",", "'.json'", "]", "filter_function", "(", "*", "path_segments", ")", "except", ":", "err_msg", "=", "'%s(filter_function=%s)'", "%", "(", "title", ",", "filter_function", ".", "__class__", ".", "__name__", ")", "raise", "TypeError", "(", "'%s must accept positional arguments.'", "%", "err_msg", ")", "# construct empty results list", "results_list", "=", "[", "]", "check_key", "=", "True", "if", "previous_key", ":", "check_key", "=", "False", "# determine root path", "root_path", "=", "''", "if", "prefix", ":", "from", "os", "import", "path", "root_path", ",", "file_name", "=", "path", ".", "split", "(", "prefix", ")", "# iterate over dropbox files", "for", "file_path", ",", "file_id", "in", "self", ".", "_walk", "(", "root_path", ")", ":", "path_segments", "=", "file_path", ".", "split", "(", "os", ".", "sep", ")", "record_key", "=", "os", ".", "path", ".", "join", "(", "*", "path_segments", ")", "record_key", "=", "record_key", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "record_key", "==", "previous_key", ":", "check_key", "=", "True", "# find starting point", "if", "not", "check_key", ":", "continue", "# apply prefix filter", "partial_key", "=", "record_key", "if", "prefix", ":", "if", "record_key", ".", "find", "(", "prefix", ")", "==", "0", ":", "partial_key", "=", "record_key", "[", "len", "(", "prefix", ")", ":", "]", "else", ":", "continue", "# apply delimiter filter", "if", "delimiter", ":", "if", "partial_key", ".", "find", "(", "delimiter", ")", ">", "-", "1", ":", "continue", "# apply filter function", "if", "filter_function", ":", "if", "filter_function", "(", "*", "path_segments", ")", ":", "results_list", ".", "append", "(", "record_key", ")", "else", ":", "results_list", ".", "append", "(", "record_key", ")", "# return results list", "if", "len", "(", "results_list", ")", "==", "max_results", ":", "return", "results_list", "return", "results_list" ]
a method to list keys in the google drive collection :param prefix: string with prefix value to filter results :param delimiter: string with value which results must not contain (after prefix) :param filter_function: (positional arguments) function used to filter results :param max_results: integer with maximum number of results to return :param previous_key: string with key in collection to begin search after :return: list of key strings NOTE: each key string can be divided into one or more segments based upon the / characters which occur in the key string as well as its file extension type. if the key string represents a file path, then each directory in the path, the file name and the file extension are all separate indexed values. eg. lab/unittests/1473719695.2165067.json is indexed: [ 'lab', 'unittests', '1473719695.2165067', '.json' ] it is possible to filter the records in the collection according to one or more of these path segments using a filter_function. NOTE: the filter_function must be able to accept an array of positional arguments and return a value that can evaluate to true or false. while searching the records, list produces an array of strings which represent the directory structure in relative path of each key string. if a filter_function is provided, this list of strings is fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results.
[ "a", "method", "to", "list", "keys", "in", "the", "google", "drive", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L815-L920
244,738
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient.delete
def delete(self, record_key): ''' a method to delete a file :param record_key: string with name of file :return: string reporting outcome ''' title = '%s.delete' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate existence of file file_id, parent_id = self._get_id(record_key) if not file_id: exit_msg = '%s does not exist.' % record_key return exit_msg # remove file try: self.drive.delete(fileId=file_id).execute() except: raise DriveConnectionError(title) # determine file directory current_dir = os.path.split(record_key)[0] # remove empty parent folders try: while current_dir: folder_id, parent_id = self._get_id(current_dir) count = 0 for id, name, mimetype in self._list_directory(folder_id): count += 1 break if count: self.drive.delete(fileId=folder_id).execute() current_dir = os.path.split(current_dir)[0] else: break except: raise DriveConnectionError(title) # return exit message exit_msg = '%s has been deleted.' % record_key return exit_msg
python
def delete(self, record_key): ''' a method to delete a file :param record_key: string with name of file :return: string reporting outcome ''' title = '%s.delete' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # validate existence of file file_id, parent_id = self._get_id(record_key) if not file_id: exit_msg = '%s does not exist.' % record_key return exit_msg # remove file try: self.drive.delete(fileId=file_id).execute() except: raise DriveConnectionError(title) # determine file directory current_dir = os.path.split(record_key)[0] # remove empty parent folders try: while current_dir: folder_id, parent_id = self._get_id(current_dir) count = 0 for id, name, mimetype in self._list_directory(folder_id): count += 1 break if count: self.drive.delete(fileId=folder_id).execute() current_dir = os.path.split(current_dir)[0] else: break except: raise DriveConnectionError(title) # return exit message exit_msg = '%s has been deleted.' % record_key return exit_msg
[ "def", "delete", "(", "self", ",", "record_key", ")", ":", "title", "=", "'%s.delete'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'record_key'", ":", "record_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# validate existence of file", "file_id", ",", "parent_id", "=", "self", ".", "_get_id", "(", "record_key", ")", "if", "not", "file_id", ":", "exit_msg", "=", "'%s does not exist.'", "%", "record_key", "return", "exit_msg", "# remove file", "try", ":", "self", ".", "drive", ".", "delete", "(", "fileId", "=", "file_id", ")", ".", "execute", "(", ")", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "# determine file directory", "current_dir", "=", "os", ".", "path", ".", "split", "(", "record_key", ")", "[", "0", "]", "# remove empty parent folders", "try", ":", "while", "current_dir", ":", "folder_id", ",", "parent_id", "=", "self", ".", "_get_id", "(", "current_dir", ")", "count", "=", "0", "for", "id", ",", "name", ",", "mimetype", "in", "self", ".", "_list_directory", "(", "folder_id", ")", ":", "count", "+=", "1", "break", "if", "count", ":", "self", ".", "drive", ".", "delete", "(", "fileId", "=", "folder_id", ")", ".", "execute", "(", ")", "current_dir", "=", "os", ".", "path", ".", "split", "(", "current_dir", ")", "[", "0", "]", "else", ":", "break", "except", ":", "raise", "DriveConnectionError", "(", "title", ")", "# return exit message", "exit_msg", "=", "'%s has been deleted.'", "%", "record_key", "return", "exit_msg" ]
a method to delete a file :param record_key: string with name of file :return: string reporting outcome
[ "a", "method", "to", "delete", "a", "file" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L922-L973
244,739
collectiveacuity/labPack
labpack/storage/google/drive.py
driveClient.remove
def remove(self): ''' a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # get contents of root for id, name, mimetype in self._list_directory(): try: self.drive.delete(fileId=id).execute() except Exception as err: if str(err).find('File not found') > -1: pass else: raise DriveConnectionError(title) # return outcome insert = 'collection' if self.collection_name: insert = self.collection_name exit_msg = 'Contents of %s will be removed from Google Drive.' % insert return exit_msg
python
def remove(self): ''' a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion ''' title = '%s.remove' % self.__class__.__name__ # get contents of root for id, name, mimetype in self._list_directory(): try: self.drive.delete(fileId=id).execute() except Exception as err: if str(err).find('File not found') > -1: pass else: raise DriveConnectionError(title) # return outcome insert = 'collection' if self.collection_name: insert = self.collection_name exit_msg = 'Contents of %s will be removed from Google Drive.' % insert return exit_msg
[ "def", "remove", "(", "self", ")", ":", "title", "=", "'%s.remove'", "%", "self", ".", "__class__", ".", "__name__", "# get contents of root", "for", "id", ",", "name", ",", "mimetype", "in", "self", ".", "_list_directory", "(", ")", ":", "try", ":", "self", ".", "drive", ".", "delete", "(", "fileId", "=", "id", ")", ".", "execute", "(", ")", "except", "Exception", "as", "err", ":", "if", "str", "(", "err", ")", ".", "find", "(", "'File not found'", ")", ">", "-", "1", ":", "pass", "else", ":", "raise", "DriveConnectionError", "(", "title", ")", "# return outcome", "insert", "=", "'collection'", "if", "self", ".", "collection_name", ":", "insert", "=", "self", ".", "collection_name", "exit_msg", "=", "'Contents of %s will be removed from Google Drive.'", "%", "insert", "return", "exit_msg" ]
a method to remove all records in the collection NOTE: this method removes all the files in the collection, but the collection folder itself created by oauth2 cannot be removed. only the user can remove access to the app folder :return: string with confirmation of deletion
[ "a", "method", "to", "remove", "all", "records", "in", "the", "collection" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/google/drive.py#L975-L1004
244,740
dsoprea/protobufp
protobufp/read_buffer.py
ReadBuffer.read_message
def read_message(self): """Try to read a message from the buffered data. A message is defined as a 32-bit integer size, followed that number of bytes. First we try to non-destructively read the integer. Then, we try to non- destructively read the remaining bytes. If both are successful, we then go back to remove the span from the front of the buffers. """ with self.__class__.__locker: result = self.__passive_read(4) if result is None: return None (four_bytes, last_buffer_index, updates1) = result (length,) = unpack('>I', four_bytes) result = self.__passive_read(length, last_buffer_index) if result is None: return None (data, last_buffer_index, updates2) = result # If we get here, we found a message. Remove it from the buffers. for updates in (updates1, updates2): for update in updates: (buffer_index, buffer_, length_consumed) = update self.__buffers[buffer_index] = buffer_ if buffer_ else '' self.__length -= length_consumed self.__read_buffer_index = last_buffer_index self.__hits += 1 if self.__hits >= self.__class__.__cleanup_interval: self.__cleanup() self.__hits = 0 return data
python
def read_message(self): """Try to read a message from the buffered data. A message is defined as a 32-bit integer size, followed that number of bytes. First we try to non-destructively read the integer. Then, we try to non- destructively read the remaining bytes. If both are successful, we then go back to remove the span from the front of the buffers. """ with self.__class__.__locker: result = self.__passive_read(4) if result is None: return None (four_bytes, last_buffer_index, updates1) = result (length,) = unpack('>I', four_bytes) result = self.__passive_read(length, last_buffer_index) if result is None: return None (data, last_buffer_index, updates2) = result # If we get here, we found a message. Remove it from the buffers. for updates in (updates1, updates2): for update in updates: (buffer_index, buffer_, length_consumed) = update self.__buffers[buffer_index] = buffer_ if buffer_ else '' self.__length -= length_consumed self.__read_buffer_index = last_buffer_index self.__hits += 1 if self.__hits >= self.__class__.__cleanup_interval: self.__cleanup() self.__hits = 0 return data
[ "def", "read_message", "(", "self", ")", ":", "with", "self", ".", "__class__", ".", "__locker", ":", "result", "=", "self", ".", "__passive_read", "(", "4", ")", "if", "result", "is", "None", ":", "return", "None", "(", "four_bytes", ",", "last_buffer_index", ",", "updates1", ")", "=", "result", "(", "length", ",", ")", "=", "unpack", "(", "'>I'", ",", "four_bytes", ")", "result", "=", "self", ".", "__passive_read", "(", "length", ",", "last_buffer_index", ")", "if", "result", "is", "None", ":", "return", "None", "(", "data", ",", "last_buffer_index", ",", "updates2", ")", "=", "result", "# If we get here, we found a message. Remove it from the buffers.", "for", "updates", "in", "(", "updates1", ",", "updates2", ")", ":", "for", "update", "in", "updates", ":", "(", "buffer_index", ",", "buffer_", ",", "length_consumed", ")", "=", "update", "self", ".", "__buffers", "[", "buffer_index", "]", "=", "buffer_", "if", "buffer_", "else", "''", "self", ".", "__length", "-=", "length_consumed", "self", ".", "__read_buffer_index", "=", "last_buffer_index", "self", ".", "__hits", "+=", "1", "if", "self", ".", "__hits", ">=", "self", ".", "__class__", ".", "__cleanup_interval", ":", "self", ".", "__cleanup", "(", ")", "self", ".", "__hits", "=", "0", "return", "data" ]
Try to read a message from the buffered data. A message is defined as a 32-bit integer size, followed that number of bytes. First we try to non-destructively read the integer. Then, we try to non- destructively read the remaining bytes. If both are successful, we then go back to remove the span from the front of the buffers.
[ "Try", "to", "read", "a", "message", "from", "the", "buffered", "data", ".", "A", "message", "is", "defined", "as", "a", "32", "-", "bit", "integer", "size", "followed", "that", "number", "of", "bytes", ".", "First", "we", "try", "to", "non", "-", "destructively", "read", "the", "integer", ".", "Then", "we", "try", "to", "non", "-", "destructively", "read", "the", "remaining", "bytes", ".", "If", "both", "are", "successful", "we", "then", "go", "back", "to", "remove", "the", "span", "from", "the", "front", "of", "the", "buffers", "." ]
3ba4d583d1002761ad316072bc0259b147584dcd
https://github.com/dsoprea/protobufp/blob/3ba4d583d1002761ad316072bc0259b147584dcd/protobufp/read_buffer.py#L31-L68
244,741
dsoprea/protobufp
protobufp/read_buffer.py
ReadBuffer.__cleanup
def __cleanup(self): """Clip buffers that the top of our list that have been completely exhausted. """ # TODO: Test this. with self.__class__.__locker: while self.__read_buffer_index > 0: del self.__buffers[0] self.__read_buffer_index -= 1
python
def __cleanup(self): """Clip buffers that the top of our list that have been completely exhausted. """ # TODO: Test this. with self.__class__.__locker: while self.__read_buffer_index > 0: del self.__buffers[0] self.__read_buffer_index -= 1
[ "def", "__cleanup", "(", "self", ")", ":", "# TODO: Test this.", "with", "self", ".", "__class__", ".", "__locker", ":", "while", "self", ".", "__read_buffer_index", ">", "0", ":", "del", "self", ".", "__buffers", "[", "0", "]", "self", ".", "__read_buffer_index", "-=", "1" ]
Clip buffers that the top of our list that have been completely exhausted.
[ "Clip", "buffers", "that", "the", "top", "of", "our", "list", "that", "have", "been", "completely", "exhausted", "." ]
3ba4d583d1002761ad316072bc0259b147584dcd
https://github.com/dsoprea/protobufp/blob/3ba4d583d1002761ad316072bc0259b147584dcd/protobufp/read_buffer.py#L123-L132
244,742
jic-dtool/dtool-info
dtool_info/overlay.py
ls
def ls(dataset_uri): """ List the overlays in the dataset. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) for overlay_name in dataset.list_overlay_names(): click.secho(overlay_name)
python
def ls(dataset_uri): """ List the overlays in the dataset. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) for overlay_name in dataset.list_overlay_names(): click.secho(overlay_name)
[ "def", "ls", "(", "dataset_uri", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "for", "overlay_name", "in", "dataset", ".", "list_overlay_names", "(", ")", ":", "click", ".", "secho", "(", "overlay_name", ")" ]
List the overlays in the dataset.
[ "List", "the", "overlays", "in", "the", "dataset", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/overlay.py#L28-L34
244,743
jic-dtool/dtool-info
dtool_info/overlay.py
show
def show(dataset_uri, overlay_name): """ Show the content of a specific overlay. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: overlay = dataset.get_overlay(overlay_name) except: # NOQA click.secho( "No such overlay: {}".format(overlay_name), fg="red", err=True ) sys.exit(11) formatted_json = json.dumps(overlay, indent=2) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False)
python
def show(dataset_uri, overlay_name): """ Show the content of a specific overlay. """ dataset = dtoolcore.DataSet.from_uri(dataset_uri) try: overlay = dataset.get_overlay(overlay_name) except: # NOQA click.secho( "No such overlay: {}".format(overlay_name), fg="red", err=True ) sys.exit(11) formatted_json = json.dumps(overlay, indent=2) colorful_json = pygments.highlight( formatted_json, pygments.lexers.JsonLexer(), pygments.formatters.TerminalFormatter()) click.secho(colorful_json, nl=False)
[ "def", "show", "(", "dataset_uri", ",", "overlay_name", ")", ":", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "dataset_uri", ")", "try", ":", "overlay", "=", "dataset", ".", "get_overlay", "(", "overlay_name", ")", "except", ":", "# NOQA", "click", ".", "secho", "(", "\"No such overlay: {}\"", ".", "format", "(", "overlay_name", ")", ",", "fg", "=", "\"red\"", ",", "err", "=", "True", ")", "sys", ".", "exit", "(", "11", ")", "formatted_json", "=", "json", ".", "dumps", "(", "overlay", ",", "indent", "=", "2", ")", "colorful_json", "=", "pygments", ".", "highlight", "(", "formatted_json", ",", "pygments", ".", "lexers", ".", "JsonLexer", "(", ")", ",", "pygments", ".", "formatters", ".", "TerminalFormatter", "(", ")", ")", "click", ".", "secho", "(", "colorful_json", ",", "nl", "=", "False", ")" ]
Show the content of a specific overlay.
[ "Show", "the", "content", "of", "a", "specific", "overlay", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/overlay.py#L40-L60
244,744
baliame/http-hmac-python
httphmac/request.py
canonicalize_header
def canonicalize_header(key): """Returns the canonicalized header name for the header name provided as an argument. The canonicalized header name according to the HTTP RFC is Kebab-Camel-Case. Keyword arguments: key -- the name of the header """ bits = key.split('-') for idx, b in enumerate(bits): bits[idx] = b.capitalize() return '-'.join(bits)
python
def canonicalize_header(key): """Returns the canonicalized header name for the header name provided as an argument. The canonicalized header name according to the HTTP RFC is Kebab-Camel-Case. Keyword arguments: key -- the name of the header """ bits = key.split('-') for idx, b in enumerate(bits): bits[idx] = b.capitalize() return '-'.join(bits)
[ "def", "canonicalize_header", "(", "key", ")", ":", "bits", "=", "key", ".", "split", "(", "'-'", ")", "for", "idx", ",", "b", "in", "enumerate", "(", "bits", ")", ":", "bits", "[", "idx", "]", "=", "b", ".", "capitalize", "(", ")", "return", "'-'", ".", "join", "(", "bits", ")" ]
Returns the canonicalized header name for the header name provided as an argument. The canonicalized header name according to the HTTP RFC is Kebab-Camel-Case. Keyword arguments: key -- the name of the header
[ "Returns", "the", "canonicalized", "header", "name", "for", "the", "header", "name", "provided", "as", "an", "argument", ".", "The", "canonicalized", "header", "name", "according", "to", "the", "HTTP", "RFC", "is", "Kebab", "-", "Camel", "-", "Case", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L99-L109
244,745
baliame/http-hmac-python
httphmac/request.py
URL.validate
def validate(self): """Validates the URL object. The URL object is invalid if it does not represent an absolute URL. Returns True or False based on this. """ if (self.scheme is None or self.scheme != '') \ and (self.host is None or self.host == ''): return False return True
python
def validate(self): """Validates the URL object. The URL object is invalid if it does not represent an absolute URL. Returns True or False based on this. """ if (self.scheme is None or self.scheme != '') \ and (self.host is None or self.host == ''): return False return True
[ "def", "validate", "(", "self", ")", ":", "if", "(", "self", ".", "scheme", "is", "None", "or", "self", ".", "scheme", "!=", "''", ")", "and", "(", "self", ".", "host", "is", "None", "or", "self", ".", "host", "==", "''", ")", ":", "return", "False", "return", "True" ]
Validates the URL object. The URL object is invalid if it does not represent an absolute URL. Returns True or False based on this.
[ "Validates", "the", "URL", "object", ".", "The", "URL", "object", "is", "invalid", "if", "it", "does", "not", "represent", "an", "absolute", "URL", ".", "Returns", "True", "or", "False", "based", "on", "this", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L37-L44
244,746
baliame/http-hmac-python
httphmac/request.py
URL.request_uri
def request_uri(self): """Returns the request URL element of the URL. This request URL is the path, the query and the fragment appended as a relative URL to the host. The request URL always contains at least a leading slash. """ result = '/{0}'.format(self.path.lstrip('/')) if self.query is not None and self.query != '' and self.query != {}: result += '?{0}'.format(self.encoded_query()) if self.fragment is not None and self.fragment != '': result += '#{0}'.format(self.fragment) return result
python
def request_uri(self): """Returns the request URL element of the URL. This request URL is the path, the query and the fragment appended as a relative URL to the host. The request URL always contains at least a leading slash. """ result = '/{0}'.format(self.path.lstrip('/')) if self.query is not None and self.query != '' and self.query != {}: result += '?{0}'.format(self.encoded_query()) if self.fragment is not None and self.fragment != '': result += '#{0}'.format(self.fragment) return result
[ "def", "request_uri", "(", "self", ")", ":", "result", "=", "'/{0}'", ".", "format", "(", "self", ".", "path", ".", "lstrip", "(", "'/'", ")", ")", "if", "self", ".", "query", "is", "not", "None", "and", "self", ".", "query", "!=", "''", "and", "self", ".", "query", "!=", "{", "}", ":", "result", "+=", "'?{0}'", ".", "format", "(", "self", ".", "encoded_query", "(", ")", ")", "if", "self", ".", "fragment", "is", "not", "None", "and", "self", ".", "fragment", "!=", "''", ":", "result", "+=", "'#{0}'", ".", "format", "(", "self", ".", "fragment", ")", "return", "result" ]
Returns the request URL element of the URL. This request URL is the path, the query and the fragment appended as a relative URL to the host. The request URL always contains at least a leading slash.
[ "Returns", "the", "request", "URL", "element", "of", "the", "URL", ".", "This", "request", "URL", "is", "the", "path", "the", "query", "and", "the", "fragment", "appended", "as", "a", "relative", "URL", "to", "the", "host", ".", "The", "request", "URL", "always", "contains", "at", "least", "a", "leading", "slash", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L68-L78
244,747
baliame/http-hmac-python
httphmac/request.py
URL.encoded_query
def encoded_query(self): """Returns the encoded query string of the URL. This may be different from the rawquery element, as that contains the query parsed by urllib but unmodified. The return value takes the form of key=value&key=value, and it never contains a leading question mark. """ if self.query is not None and self.query != '' and self.query != {}: try: return urlencode(self.query, doseq=True, quote_via=urlquote) except TypeError: return '&'.join(["{0}={1}".format(urlquote(k), urlquote(self.query[k][0])) for k in self.query]) else: return ''
python
def encoded_query(self): """Returns the encoded query string of the URL. This may be different from the rawquery element, as that contains the query parsed by urllib but unmodified. The return value takes the form of key=value&key=value, and it never contains a leading question mark. """ if self.query is not None and self.query != '' and self.query != {}: try: return urlencode(self.query, doseq=True, quote_via=urlquote) except TypeError: return '&'.join(["{0}={1}".format(urlquote(k), urlquote(self.query[k][0])) for k in self.query]) else: return ''
[ "def", "encoded_query", "(", "self", ")", ":", "if", "self", ".", "query", "is", "not", "None", "and", "self", ".", "query", "!=", "''", "and", "self", ".", "query", "!=", "{", "}", ":", "try", ":", "return", "urlencode", "(", "self", ".", "query", ",", "doseq", "=", "True", ",", "quote_via", "=", "urlquote", ")", "except", "TypeError", ":", "return", "'&'", ".", "join", "(", "[", "\"{0}={1}\"", ".", "format", "(", "urlquote", "(", "k", ")", ",", "urlquote", "(", "self", ".", "query", "[", "k", "]", "[", "0", "]", ")", ")", "for", "k", "in", "self", ".", "query", "]", ")", "else", ":", "return", "''" ]
Returns the encoded query string of the URL. This may be different from the rawquery element, as that contains the query parsed by urllib but unmodified. The return value takes the form of key=value&key=value, and it never contains a leading question mark.
[ "Returns", "the", "encoded", "query", "string", "of", "the", "URL", ".", "This", "may", "be", "different", "from", "the", "rawquery", "element", "as", "that", "contains", "the", "query", "parsed", "by", "urllib", "but", "unmodified", ".", "The", "return", "value", "takes", "the", "form", "of", "key", "=", "value&key", "=", "value", "and", "it", "never", "contains", "a", "leading", "question", "mark", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L85-L96
244,748
baliame/http-hmac-python
httphmac/request.py
Request.with_url
def with_url(self, url): """Sets the request's URL and returns the request itself. Automatically sets the Host header according to the URL. Keyword arguments: url -- a string representing the URL the set for the request """ self.url = URL(url) self.header["Host"] = self.url.host return self
python
def with_url(self, url): """Sets the request's URL and returns the request itself. Automatically sets the Host header according to the URL. Keyword arguments: url -- a string representing the URL the set for the request """ self.url = URL(url) self.header["Host"] = self.url.host return self
[ "def", "with_url", "(", "self", ",", "url", ")", ":", "self", ".", "url", "=", "URL", "(", "url", ")", "self", ".", "header", "[", "\"Host\"", "]", "=", "self", ".", "url", ".", "host", "return", "self" ]
Sets the request's URL and returns the request itself. Automatically sets the Host header according to the URL. Keyword arguments: url -- a string representing the URL the set for the request
[ "Sets", "the", "request", "s", "URL", "and", "returns", "the", "request", "itself", ".", "Automatically", "sets", "the", "Host", "header", "according", "to", "the", "URL", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L130-L139
244,749
baliame/http-hmac-python
httphmac/request.py
Request.with_headers
def with_headers(self, headers): """Sets multiple headers on the request and returns the request itself. Keyword arguments: headers -- a dict-like object which contains the headers to set. """ for key, value in headers.items(): self.with_header(key, value) return self
python
def with_headers(self, headers): """Sets multiple headers on the request and returns the request itself. Keyword arguments: headers -- a dict-like object which contains the headers to set. """ for key, value in headers.items(): self.with_header(key, value) return self
[ "def", "with_headers", "(", "self", ",", "headers", ")", ":", "for", "key", ",", "value", "in", "headers", ".", "items", "(", ")", ":", "self", ".", "with_header", "(", "key", ",", "value", ")", "return", "self" ]
Sets multiple headers on the request and returns the request itself. Keyword arguments: headers -- a dict-like object which contains the headers to set.
[ "Sets", "multiple", "headers", "on", "the", "request", "and", "returns", "the", "request", "itself", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L152-L160
244,750
baliame/http-hmac-python
httphmac/request.py
Request.with_body
def with_body(self, body): # @todo take encoding into account """Sets the request body to the provided value and returns the request itself. Keyword arguments: body -- A UTF-8 string or bytes-like object which represents the request body. """ try: self.body = body.encode('utf-8') except: try: self.body = bytes(body) except: raise ValueError("Request body must be a string or bytes-like object.") hasher = hashlib.sha256() hasher.update(self.body) digest = base64.b64encode(hasher.digest()).decode('utf-8') self.with_header("X-Authorization-Content-Sha256", digest) return self
python
def with_body(self, body): # @todo take encoding into account """Sets the request body to the provided value and returns the request itself. Keyword arguments: body -- A UTF-8 string or bytes-like object which represents the request body. """ try: self.body = body.encode('utf-8') except: try: self.body = bytes(body) except: raise ValueError("Request body must be a string or bytes-like object.") hasher = hashlib.sha256() hasher.update(self.body) digest = base64.b64encode(hasher.digest()).decode('utf-8') self.with_header("X-Authorization-Content-Sha256", digest) return self
[ "def", "with_body", "(", "self", ",", "body", ")", ":", "# @todo take encoding into account", "try", ":", "self", ".", "body", "=", "body", ".", "encode", "(", "'utf-8'", ")", "except", ":", "try", ":", "self", ".", "body", "=", "bytes", "(", "body", ")", "except", ":", "raise", "ValueError", "(", "\"Request body must be a string or bytes-like object.\"", ")", "hasher", "=", "hashlib", ".", "sha256", "(", ")", "hasher", ".", "update", "(", "self", ".", "body", ")", "digest", "=", "base64", ".", "b64encode", "(", "hasher", ".", "digest", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", "self", ".", "with_header", "(", "\"X-Authorization-Content-Sha256\"", ",", "digest", ")", "return", "self" ]
Sets the request body to the provided value and returns the request itself. Keyword arguments: body -- A UTF-8 string or bytes-like object which represents the request body.
[ "Sets", "the", "request", "body", "to", "the", "provided", "value", "and", "returns", "the", "request", "itself", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L170-L188
244,751
baliame/http-hmac-python
httphmac/request.py
Request.get_header
def get_header(self, key): """Returns the requested header, or an empty string if the header is not set. Keyword arguments: key -- The header name. It will be canonicalized before use. """ key = canonicalize_header(key) if key in self.header: return self.header[key] return ''
python
def get_header(self, key): """Returns the requested header, or an empty string if the header is not set. Keyword arguments: key -- The header name. It will be canonicalized before use. """ key = canonicalize_header(key) if key in self.header: return self.header[key] return ''
[ "def", "get_header", "(", "self", ",", "key", ")", ":", "key", "=", "canonicalize_header", "(", "key", ")", "if", "key", "in", "self", ".", "header", ":", "return", "self", ".", "header", "[", "key", "]", "return", "''" ]
Returns the requested header, or an empty string if the header is not set. Keyword arguments: key -- The header name. It will be canonicalized before use.
[ "Returns", "the", "requested", "header", "or", "an", "empty", "string", "if", "the", "header", "is", "not", "set", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L208-L217
244,752
baliame/http-hmac-python
httphmac/request.py
Request.do
def do(self): """Executes the request represented by this object. The requests library will be used for this purpose. Returns an instance of requests.Response. """ data = None if self.body is not None and self.body != b'': data = self.body return requests.request(self.method, str(self.url), data=data, headers=self.header)
python
def do(self): """Executes the request represented by this object. The requests library will be used for this purpose. Returns an instance of requests.Response. """ data = None if self.body is not None and self.body != b'': data = self.body return requests.request(self.method, str(self.url), data=data, headers=self.header)
[ "def", "do", "(", "self", ")", ":", "data", "=", "None", "if", "self", ".", "body", "is", "not", "None", "and", "self", ".", "body", "!=", "b''", ":", "data", "=", "self", ".", "body", "return", "requests", ".", "request", "(", "self", ".", "method", ",", "str", "(", "self", ".", "url", ")", ",", "data", "=", "data", ",", "headers", "=", "self", ".", "header", ")" ]
Executes the request represented by this object. The requests library will be used for this purpose. Returns an instance of requests.Response.
[ "Executes", "the", "request", "represented", "by", "this", "object", ".", "The", "requests", "library", "will", "be", "used", "for", "this", "purpose", ".", "Returns", "an", "instance", "of", "requests", ".", "Response", "." ]
9884c0cbfdb712f9f37080a8efbfdce82850785f
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L219-L226
244,753
radjkarl/fancyTools
fancytools/utils/StreamSignal.py
StreamSignal.setWriteToShell
def setWriteToShell(self, writeToShell=True): """connect sysout to the qtSignal""" if writeToShell and not self._connected: self.message.connect(self.stdW) self._connected = True elif not writeToShell and self._connected: try: self.message.disconnect(self.stdW) except TypeError: pass # was not connected self._connected = False
python
def setWriteToShell(self, writeToShell=True): """connect sysout to the qtSignal""" if writeToShell and not self._connected: self.message.connect(self.stdW) self._connected = True elif not writeToShell and self._connected: try: self.message.disconnect(self.stdW) except TypeError: pass # was not connected self._connected = False
[ "def", "setWriteToShell", "(", "self", ",", "writeToShell", "=", "True", ")", ":", "if", "writeToShell", "and", "not", "self", ".", "_connected", ":", "self", ".", "message", ".", "connect", "(", "self", ".", "stdW", ")", "self", ".", "_connected", "=", "True", "elif", "not", "writeToShell", "and", "self", ".", "_connected", ":", "try", ":", "self", ".", "message", ".", "disconnect", "(", "self", ".", "stdW", ")", "except", "TypeError", ":", "pass", "# was not connected\r", "self", ".", "_connected", "=", "False" ]
connect sysout to the qtSignal
[ "connect", "sysout", "to", "the", "qtSignal" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/utils/StreamSignal.py#L62-L72
244,754
synw/django-introspection
introspection/inspector/__init__.py
Inspector._get_app
def _get_app(self, appname): """ returns app object or None """ try: app = APPS.get_app_config(appname) except Exception as e: self.err(e) return return app
python
def _get_app(self, appname): """ returns app object or None """ try: app = APPS.get_app_config(appname) except Exception as e: self.err(e) return return app
[ "def", "_get_app", "(", "self", ",", "appname", ")", ":", "try", ":", "app", "=", "APPS", ".", "get_app_config", "(", "appname", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ")", "return", "return", "app" ]
returns app object or None
[ "returns", "app", "object", "or", "None" ]
58a9c44af36965e0f203456891172d560140f83c
https://github.com/synw/django-introspection/blob/58a9c44af36965e0f203456891172d560140f83c/introspection/inspector/__init__.py#L254-L263
244,755
synw/django-introspection
introspection/inspector/__init__.py
Inspector._get_model
def _get_model(self, appname, modelname): """ return model or None """ app = self._get_app(appname) models = app.get_models() model = None for mod in models: if mod.__name__ == modelname: model = mod return model msg = "Model " + modelname + " not found"
python
def _get_model(self, appname, modelname): """ return model or None """ app = self._get_app(appname) models = app.get_models() model = None for mod in models: if mod.__name__ == modelname: model = mod return model msg = "Model " + modelname + " not found"
[ "def", "_get_model", "(", "self", ",", "appname", ",", "modelname", ")", ":", "app", "=", "self", ".", "_get_app", "(", "appname", ")", "models", "=", "app", ".", "get_models", "(", ")", "model", "=", "None", "for", "mod", "in", "models", ":", "if", "mod", ".", "__name__", "==", "modelname", ":", "model", "=", "mod", "return", "model", "msg", "=", "\"Model \"", "+", "modelname", "+", "\" not found\"" ]
return model or None
[ "return", "model", "or", "None" ]
58a9c44af36965e0f203456891172d560140f83c
https://github.com/synw/django-introspection/blob/58a9c44af36965e0f203456891172d560140f83c/introspection/inspector/__init__.py#L265-L276
244,756
synw/django-introspection
introspection/inspector/__init__.py
Inspector._count_model
def _count_model(self, model): """ return model count """ try: res = model.objects.all().count() except Exception as e: self.err(e) return return res
python
def _count_model(self, model): """ return model count """ try: res = model.objects.all().count() except Exception as e: self.err(e) return return res
[ "def", "_count_model", "(", "self", ",", "model", ")", ":", "try", ":", "res", "=", "model", ".", "objects", ".", "all", "(", ")", ".", "count", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ")", "return", "return", "res" ]
return model count
[ "return", "model", "count" ]
58a9c44af36965e0f203456891172d560140f83c
https://github.com/synw/django-introspection/blob/58a9c44af36965e0f203456891172d560140f83c/introspection/inspector/__init__.py#L292-L301
244,757
matthewdeanmartin/jiggle_version
jiggle_version/central_module_finder.py
CentralModuleFinder.find_setup_file_name
def find_setup_file_name(self): # type: () ->None """ Usually setup.py or setup """ for file_path in [ x for x in os.listdir(".") if os.path.isfile(x) and x in ["setup.py", "setup"] ]: if self.file_opener.is_python_inside(file_path): self.setup_file_name = file_path break
python
def find_setup_file_name(self): # type: () ->None """ Usually setup.py or setup """ for file_path in [ x for x in os.listdir(".") if os.path.isfile(x) and x in ["setup.py", "setup"] ]: if self.file_opener.is_python_inside(file_path): self.setup_file_name = file_path break
[ "def", "find_setup_file_name", "(", "self", ")", ":", "# type: () ->None", "for", "file_path", "in", "[", "x", "for", "x", "in", "os", ".", "listdir", "(", "\".\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "x", ")", "and", "x", "in", "[", "\"setup.py\"", ",", "\"setup\"", "]", "]", ":", "if", "self", ".", "file_opener", ".", "is_python_inside", "(", "file_path", ")", ":", "self", ".", "setup_file_name", "=", "file_path", "break" ]
Usually setup.py or setup
[ "Usually", "setup", ".", "py", "or", "setup" ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/central_module_finder.py#L48-L59
244,758
jeroyang/txttk
txttk/nlptools.py
clause_tokenize
def clause_tokenize(sentence): """ Split on comma or parenthesis, if there are more then three words for each clause >>> context = 'While I was walking home, this bird fell down in front of me.' >>> clause_tokenize(context) ['While I was walking home,', ' this bird fell down in front of me.'] """ clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))') clause_stem = clause_re.sub(r'\1###clausebreak###', sentence) return [c for c in clause_stem.split('###clausebreak###') if c != '']
python
def clause_tokenize(sentence): """ Split on comma or parenthesis, if there are more then three words for each clause >>> context = 'While I was walking home, this bird fell down in front of me.' >>> clause_tokenize(context) ['While I was walking home,', ' this bird fell down in front of me.'] """ clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))') clause_stem = clause_re.sub(r'\1###clausebreak###', sentence) return [c for c in clause_stem.split('###clausebreak###') if c != '']
[ "def", "clause_tokenize", "(", "sentence", ")", ":", "clause_re", "=", "re", ".", "compile", "(", "r'((?:\\S+\\s){2,}\\S+,|(?:\\S+\\s){3,}(?=\\((?:\\S+\\s){2,}\\S+\\)))'", ")", "clause_stem", "=", "clause_re", ".", "sub", "(", "r'\\1###clausebreak###'", ",", "sentence", ")", "return", "[", "c", "for", "c", "in", "clause_stem", ".", "split", "(", "'###clausebreak###'", ")", "if", "c", "!=", "''", "]" ]
Split on comma or parenthesis, if there are more then three words for each clause >>> context = 'While I was walking home, this bird fell down in front of me.' >>> clause_tokenize(context) ['While I was walking home,', ' this bird fell down in front of me.']
[ "Split", "on", "comma", "or", "parenthesis", "if", "there", "are", "more", "then", "three", "words", "for", "each", "clause" ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L57-L68
244,759
jeroyang/txttk
txttk/nlptools.py
word_tokenize
def word_tokenize(sentence): """ A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.'] """ date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?' number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)' arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]' word_pattern = r'[\w]+' non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~')) space_pattern = r'\s' anything_pattern = r'.' patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern] big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns]) for match in re.finditer(big_pattern, sentence): yield match.group(0)
python
def word_tokenize(sentence): """ A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.'] """ date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?' number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)' arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]' word_pattern = r'[\w]+' non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~')) space_pattern = r'\s' anything_pattern = r'.' patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern] big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns]) for match in re.finditer(big_pattern, sentence): yield match.group(0)
[ "def", "word_tokenize", "(", "sentence", ")", ":", "date_pattern", "=", "r'\\d\\d(\\d\\d)?[\\\\-]\\d\\d[\\\\-]\\d\\d(\\d\\d)?'", "number_pattern", "=", "r'[\\+-]?(\\d+\\.\\d+|\\d{1,3},(\\d{3},)*\\d{3}|\\d+)'", "arr_pattern", "=", "r'(?: \\w\\.){2,3}|(?:\\A|\\s)(?:\\w\\.){2,3}|[A-Z]\\. [a-z]'", "word_pattern", "=", "r'[\\w]+'", "non_space_pattern", "=", "r'[{}]|\\w'", ".", "format", "(", "re", ".", "escape", "(", "'!\"#$%&()*,./:;<=>?@[\\]^_-`{|}~'", ")", ")", "space_pattern", "=", "r'\\s'", "anything_pattern", "=", "r'.'", "patterns", "=", "[", "date_pattern", ",", "number_pattern", ",", "arr_pattern", ",", "word_pattern", ",", "non_space_pattern", ",", "space_pattern", ",", "anything_pattern", "]", "big_pattern", "=", "r'|'", ".", "join", "(", "[", "(", "'('", "+", "pattern", "+", "')'", ")", "for", "pattern", "in", "patterns", "]", ")", "for", "match", "in", "re", ".", "finditer", "(", "big_pattern", ",", "sentence", ")", ":", "yield", "match", ".", "group", "(", "0", ")" ]
A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
[ "A", "generator", "which", "yields", "tokens", "based", "on", "the", "given", "sentence", "without", "deleting", "anything", "." ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L70-L89
244,760
jeroyang/txttk
txttk/nlptools.py
slim_stem
def slim_stem(token): """ A very simple stemmer, for entity of GO stemming. >>> token = 'interaction' >>> slim_stem(token) 'interact' """ target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment'] for sulfix in sorted(target_sulfixs, key=len, reverse=True): if token.endswith(sulfix): token = token[0:-len(sulfix)] break if token.endswith('ll'): token = token[:-1] return token
python
def slim_stem(token): """ A very simple stemmer, for entity of GO stemming. >>> token = 'interaction' >>> slim_stem(token) 'interact' """ target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment'] for sulfix in sorted(target_sulfixs, key=len, reverse=True): if token.endswith(sulfix): token = token[0:-len(sulfix)] break if token.endswith('ll'): token = token[:-1] return token
[ "def", "slim_stem", "(", "token", ")", ":", "target_sulfixs", "=", "[", "'ic'", ",", "'tic'", ",", "'e'", ",", "'ive'", ",", "'ing'", ",", "'ical'", ",", "'nal'", ",", "'al'", ",", "'ism'", ",", "'ion'", ",", "'ation'", ",", "'ar'", ",", "'sis'", ",", "'us'", ",", "'ment'", "]", "for", "sulfix", "in", "sorted", "(", "target_sulfixs", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ":", "if", "token", ".", "endswith", "(", "sulfix", ")", ":", "token", "=", "token", "[", "0", ":", "-", "len", "(", "sulfix", ")", "]", "break", "if", "token", ".", "endswith", "(", "'ll'", ")", ":", "token", "=", "token", "[", ":", "-", "1", "]", "return", "token" ]
A very simple stemmer, for entity of GO stemming. >>> token = 'interaction' >>> slim_stem(token) 'interact'
[ "A", "very", "simple", "stemmer", "for", "entity", "of", "GO", "stemming", "." ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L91-L107
244,761
jeroyang/txttk
txttk/nlptools.py
ngram
def ngram(n, iter_tokens): """ Return a generator of n-gram from an iterable """ z = len(iter_tokens) return (iter_tokens[i:i+n] for i in range(z-n+1))
python
def ngram(n, iter_tokens): """ Return a generator of n-gram from an iterable """ z = len(iter_tokens) return (iter_tokens[i:i+n] for i in range(z-n+1))
[ "def", "ngram", "(", "n", ",", "iter_tokens", ")", ":", "z", "=", "len", "(", "iter_tokens", ")", "return", "(", "iter_tokens", "[", "i", ":", "i", "+", "n", "]", "for", "i", "in", "range", "(", "z", "-", "n", "+", "1", ")", ")" ]
Return a generator of n-gram from an iterable
[ "Return", "a", "generator", "of", "n", "-", "gram", "from", "an", "iterable" ]
8e6daf9cbb7dfbc4900870fb365add17929bd4ab
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L116-L121
244,762
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/v1/overcloud_deploy.py
DeployOvercloud.set_overcloud_passwords
def set_overcloud_passwords(self, parameters, parsed_args): """Add passwords to the parameters dictionary :param parameters: A dictionary for the passwords to be added to :type parameters: dict """ undercloud_ceilometer_snmpd_password = utils.get_config_value( "auth", "undercloud_ceilometer_snmpd_password") self.passwords = passwords = utils.generate_overcloud_passwords() ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD'] ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET'] if parsed_args.templates: parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD'] parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN'] parameters['CeilometerPassword'] = ceilometer_pass parameters['CeilometerMeteringSecret'] = ceilometer_secret parameters['CinderPassword'] = passwords[ 'OVERCLOUD_CINDER_PASSWORD'] parameters['GlancePassword'] = passwords[ 'OVERCLOUD_GLANCE_PASSWORD'] parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD'] parameters['HeatStackDomainAdminPassword'] = passwords[ 'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'] parameters['NeutronPassword'] = passwords[ 'OVERCLOUD_NEUTRON_PASSWORD'] parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD'] parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH'] parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD'] parameters['SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) else: parameters['Controller-1::AdminPassword'] = passwords[ 'OVERCLOUD_ADMIN_PASSWORD'] parameters['Controller-1::AdminToken'] = passwords[ 'OVERCLOUD_ADMIN_TOKEN'] parameters['Compute-1::AdminPassword'] = passwords[ 'OVERCLOUD_ADMIN_PASSWORD'] parameters['Controller-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Compute-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Controller-1::CeilometerPassword'] = ceilometer_pass parameters[ 'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret parameters['Compute-1::CeilometerPassword'] = ceilometer_pass parameters[ 'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret parameters['Controller-1::CinderPassword'] = ( passwords['OVERCLOUD_CINDER_PASSWORD']) parameters['Controller-1::GlancePassword'] = ( passwords['OVERCLOUD_GLANCE_PASSWORD']) parameters['Controller-1::HeatPassword'] = ( passwords['OVERCLOUD_HEAT_PASSWORD']) parameters['Controller-1::HeatStackDomainAdminPassword'] = ( passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']) parameters['Controller-1::NeutronPassword'] = ( passwords['OVERCLOUD_NEUTRON_PASSWORD']) parameters['Compute-1::NeutronPassword'] = ( passwords['OVERCLOUD_NEUTRON_PASSWORD']) parameters['Controller-1::NovaPassword'] = ( passwords['OVERCLOUD_NOVA_PASSWORD']) parameters['Compute-1::NovaPassword'] = ( passwords['OVERCLOUD_NOVA_PASSWORD']) parameters['Controller-1::SwiftHashSuffix'] = ( passwords['OVERCLOUD_SWIFT_HASH']) parameters['Controller-1::SwiftPassword'] = ( passwords['OVERCLOUD_SWIFT_PASSWORD'])
python
def set_overcloud_passwords(self, parameters, parsed_args): """Add passwords to the parameters dictionary :param parameters: A dictionary for the passwords to be added to :type parameters: dict """ undercloud_ceilometer_snmpd_password = utils.get_config_value( "auth", "undercloud_ceilometer_snmpd_password") self.passwords = passwords = utils.generate_overcloud_passwords() ceilometer_pass = passwords['OVERCLOUD_CEILOMETER_PASSWORD'] ceilometer_secret = passwords['OVERCLOUD_CEILOMETER_SECRET'] if parsed_args.templates: parameters['AdminPassword'] = passwords['OVERCLOUD_ADMIN_PASSWORD'] parameters['AdminToken'] = passwords['OVERCLOUD_ADMIN_TOKEN'] parameters['CeilometerPassword'] = ceilometer_pass parameters['CeilometerMeteringSecret'] = ceilometer_secret parameters['CinderPassword'] = passwords[ 'OVERCLOUD_CINDER_PASSWORD'] parameters['GlancePassword'] = passwords[ 'OVERCLOUD_GLANCE_PASSWORD'] parameters['HeatPassword'] = passwords['OVERCLOUD_HEAT_PASSWORD'] parameters['HeatStackDomainAdminPassword'] = passwords[ 'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'] parameters['NeutronPassword'] = passwords[ 'OVERCLOUD_NEUTRON_PASSWORD'] parameters['NovaPassword'] = passwords['OVERCLOUD_NOVA_PASSWORD'] parameters['SwiftHashSuffix'] = passwords['OVERCLOUD_SWIFT_HASH'] parameters['SwiftPassword'] = passwords['OVERCLOUD_SWIFT_PASSWORD'] parameters['SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) else: parameters['Controller-1::AdminPassword'] = passwords[ 'OVERCLOUD_ADMIN_PASSWORD'] parameters['Controller-1::AdminToken'] = passwords[ 'OVERCLOUD_ADMIN_TOKEN'] parameters['Compute-1::AdminPassword'] = passwords[ 'OVERCLOUD_ADMIN_PASSWORD'] parameters['Controller-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Cinder-Storage-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Swift-Storage-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Compute-1::SnmpdReadonlyUserPassword'] = ( undercloud_ceilometer_snmpd_password) parameters['Controller-1::CeilometerPassword'] = ceilometer_pass parameters[ 'Controller-1::CeilometerMeteringSecret'] = ceilometer_secret parameters['Compute-1::CeilometerPassword'] = ceilometer_pass parameters[ 'Compute-1::CeilometerMeteringSecret'] = ceilometer_secret parameters['Controller-1::CinderPassword'] = ( passwords['OVERCLOUD_CINDER_PASSWORD']) parameters['Controller-1::GlancePassword'] = ( passwords['OVERCLOUD_GLANCE_PASSWORD']) parameters['Controller-1::HeatPassword'] = ( passwords['OVERCLOUD_HEAT_PASSWORD']) parameters['Controller-1::HeatStackDomainAdminPassword'] = ( passwords['OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD']) parameters['Controller-1::NeutronPassword'] = ( passwords['OVERCLOUD_NEUTRON_PASSWORD']) parameters['Compute-1::NeutronPassword'] = ( passwords['OVERCLOUD_NEUTRON_PASSWORD']) parameters['Controller-1::NovaPassword'] = ( passwords['OVERCLOUD_NOVA_PASSWORD']) parameters['Compute-1::NovaPassword'] = ( passwords['OVERCLOUD_NOVA_PASSWORD']) parameters['Controller-1::SwiftHashSuffix'] = ( passwords['OVERCLOUD_SWIFT_HASH']) parameters['Controller-1::SwiftPassword'] = ( passwords['OVERCLOUD_SWIFT_PASSWORD'])
[ "def", "set_overcloud_passwords", "(", "self", ",", "parameters", ",", "parsed_args", ")", ":", "undercloud_ceilometer_snmpd_password", "=", "utils", ".", "get_config_value", "(", "\"auth\"", ",", "\"undercloud_ceilometer_snmpd_password\"", ")", "self", ".", "passwords", "=", "passwords", "=", "utils", ".", "generate_overcloud_passwords", "(", ")", "ceilometer_pass", "=", "passwords", "[", "'OVERCLOUD_CEILOMETER_PASSWORD'", "]", "ceilometer_secret", "=", "passwords", "[", "'OVERCLOUD_CEILOMETER_SECRET'", "]", "if", "parsed_args", ".", "templates", ":", "parameters", "[", "'AdminPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_ADMIN_PASSWORD'", "]", "parameters", "[", "'AdminToken'", "]", "=", "passwords", "[", "'OVERCLOUD_ADMIN_TOKEN'", "]", "parameters", "[", "'CeilometerPassword'", "]", "=", "ceilometer_pass", "parameters", "[", "'CeilometerMeteringSecret'", "]", "=", "ceilometer_secret", "parameters", "[", "'CinderPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_CINDER_PASSWORD'", "]", "parameters", "[", "'GlancePassword'", "]", "=", "passwords", "[", "'OVERCLOUD_GLANCE_PASSWORD'", "]", "parameters", "[", "'HeatPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_HEAT_PASSWORD'", "]", "parameters", "[", "'HeatStackDomainAdminPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'", "]", "parameters", "[", "'NeutronPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_NEUTRON_PASSWORD'", "]", "parameters", "[", "'NovaPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_NOVA_PASSWORD'", "]", "parameters", "[", "'SwiftHashSuffix'", "]", "=", "passwords", "[", "'OVERCLOUD_SWIFT_HASH'", "]", "parameters", "[", "'SwiftPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_SWIFT_PASSWORD'", "]", "parameters", "[", "'SnmpdReadonlyUserPassword'", "]", "=", "(", "undercloud_ceilometer_snmpd_password", ")", "else", ":", "parameters", "[", "'Controller-1::AdminPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_ADMIN_PASSWORD'", "]", "parameters", "[", "'Controller-1::AdminToken'", "]", "=", "passwords", "[", "'OVERCLOUD_ADMIN_TOKEN'", "]", "parameters", "[", "'Compute-1::AdminPassword'", "]", "=", "passwords", "[", "'OVERCLOUD_ADMIN_PASSWORD'", "]", "parameters", "[", "'Controller-1::SnmpdReadonlyUserPassword'", "]", "=", "(", "undercloud_ceilometer_snmpd_password", ")", "parameters", "[", "'Cinder-Storage-1::SnmpdReadonlyUserPassword'", "]", "=", "(", "undercloud_ceilometer_snmpd_password", ")", "parameters", "[", "'Swift-Storage-1::SnmpdReadonlyUserPassword'", "]", "=", "(", "undercloud_ceilometer_snmpd_password", ")", "parameters", "[", "'Compute-1::SnmpdReadonlyUserPassword'", "]", "=", "(", "undercloud_ceilometer_snmpd_password", ")", "parameters", "[", "'Controller-1::CeilometerPassword'", "]", "=", "ceilometer_pass", "parameters", "[", "'Controller-1::CeilometerMeteringSecret'", "]", "=", "ceilometer_secret", "parameters", "[", "'Compute-1::CeilometerPassword'", "]", "=", "ceilometer_pass", "parameters", "[", "'Compute-1::CeilometerMeteringSecret'", "]", "=", "ceilometer_secret", "parameters", "[", "'Controller-1::CinderPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_CINDER_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::GlancePassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_GLANCE_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::HeatPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_HEAT_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::HeatStackDomainAdminPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::NeutronPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_NEUTRON_PASSWORD'", "]", ")", "parameters", "[", "'Compute-1::NeutronPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_NEUTRON_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::NovaPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_NOVA_PASSWORD'", "]", ")", "parameters", "[", "'Compute-1::NovaPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_NOVA_PASSWORD'", "]", ")", "parameters", "[", "'Controller-1::SwiftHashSuffix'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_SWIFT_HASH'", "]", ")", "parameters", "[", "'Controller-1::SwiftPassword'", "]", "=", "(", "passwords", "[", "'OVERCLOUD_SWIFT_PASSWORD'", "]", ")" ]
Add passwords to the parameters dictionary :param parameters: A dictionary for the passwords to be added to :type parameters: dict
[ "Add", "passwords", "to", "the", "parameters", "dictionary" ]
165a166fb2e5a2598380779b35812b8b8478c4fb
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L104-L176
244,763
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/v1/overcloud_deploy.py
DeployOvercloud._get_stack
def _get_stack(self, orchestration_client, stack_name): """Get the ID for the current deployed overcloud stack if it exists.""" try: stack = orchestration_client.stacks.get(stack_name) self.log.info("Stack found, will be doing a stack update") return stack except HTTPNotFound: self.log.info("No stack found, will be doing a stack create")
python
def _get_stack(self, orchestration_client, stack_name): """Get the ID for the current deployed overcloud stack if it exists.""" try: stack = orchestration_client.stacks.get(stack_name) self.log.info("Stack found, will be doing a stack update") return stack except HTTPNotFound: self.log.info("No stack found, will be doing a stack create")
[ "def", "_get_stack", "(", "self", ",", "orchestration_client", ",", "stack_name", ")", ":", "try", ":", "stack", "=", "orchestration_client", ".", "stacks", ".", "get", "(", "stack_name", ")", "self", ".", "log", ".", "info", "(", "\"Stack found, will be doing a stack update\"", ")", "return", "stack", "except", "HTTPNotFound", ":", "self", ".", "log", ".", "info", "(", "\"No stack found, will be doing a stack create\"", ")" ]
Get the ID for the current deployed overcloud stack if it exists.
[ "Get", "the", "ID", "for", "the", "current", "deployed", "overcloud", "stack", "if", "it", "exists", "." ]
165a166fb2e5a2598380779b35812b8b8478c4fb
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L178-L186
244,764
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/v1/overcloud_deploy.py
DeployOvercloud._heat_deploy
def _heat_deploy(self, stack, stack_name, template_path, parameters, environments, timeout): """Verify the Baremetal nodes are available and do a stack update""" self.log.debug("Processing environment files") env_files, env = ( template_utils.process_multiple_environments_and_files( environments)) self.log.debug("Getting template contents") template_files, template = template_utils.get_template_contents( template_path) files = dict(list(template_files.items()) + list(env_files.items())) clients = self.app.client_manager orchestration_client = clients.rdomanager_oscplugin.orchestration() self.log.debug("Deploying stack: %s", stack_name) self.log.debug("Deploying template: %s", template) self.log.debug("Deploying parameters: %s", parameters) self.log.debug("Deploying environment: %s", env) self.log.debug("Deploying files: %s", files) stack_args = { 'stack_name': stack_name, 'template': template, 'parameters': parameters, 'environment': env, 'files': files } if timeout: stack_args['timeout_mins'] = timeout if stack is None: self.log.info("Performing Heat stack create") orchestration_client.stacks.create(**stack_args) else: self.log.info("Performing Heat stack update") # Make sure existing parameters for stack are reused stack_args['existing'] = 'true' orchestration_client.stacks.update(stack.id, **stack_args) create_result = utils.wait_for_stack_ready( orchestration_client, stack_name) if not create_result: if stack is None: raise Exception("Heat Stack create failed.") else: raise Exception("Heat Stack update failed.")
python
def _heat_deploy(self, stack, stack_name, template_path, parameters, environments, timeout): """Verify the Baremetal nodes are available and do a stack update""" self.log.debug("Processing environment files") env_files, env = ( template_utils.process_multiple_environments_and_files( environments)) self.log.debug("Getting template contents") template_files, template = template_utils.get_template_contents( template_path) files = dict(list(template_files.items()) + list(env_files.items())) clients = self.app.client_manager orchestration_client = clients.rdomanager_oscplugin.orchestration() self.log.debug("Deploying stack: %s", stack_name) self.log.debug("Deploying template: %s", template) self.log.debug("Deploying parameters: %s", parameters) self.log.debug("Deploying environment: %s", env) self.log.debug("Deploying files: %s", files) stack_args = { 'stack_name': stack_name, 'template': template, 'parameters': parameters, 'environment': env, 'files': files } if timeout: stack_args['timeout_mins'] = timeout if stack is None: self.log.info("Performing Heat stack create") orchestration_client.stacks.create(**stack_args) else: self.log.info("Performing Heat stack update") # Make sure existing parameters for stack are reused stack_args['existing'] = 'true' orchestration_client.stacks.update(stack.id, **stack_args) create_result = utils.wait_for_stack_ready( orchestration_client, stack_name) if not create_result: if stack is None: raise Exception("Heat Stack create failed.") else: raise Exception("Heat Stack update failed.")
[ "def", "_heat_deploy", "(", "self", ",", "stack", ",", "stack_name", ",", "template_path", ",", "parameters", ",", "environments", ",", "timeout", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Processing environment files\"", ")", "env_files", ",", "env", "=", "(", "template_utils", ".", "process_multiple_environments_and_files", "(", "environments", ")", ")", "self", ".", "log", ".", "debug", "(", "\"Getting template contents\"", ")", "template_files", ",", "template", "=", "template_utils", ".", "get_template_contents", "(", "template_path", ")", "files", "=", "dict", "(", "list", "(", "template_files", ".", "items", "(", ")", ")", "+", "list", "(", "env_files", ".", "items", "(", ")", ")", ")", "clients", "=", "self", ".", "app", ".", "client_manager", "orchestration_client", "=", "clients", ".", "rdomanager_oscplugin", ".", "orchestration", "(", ")", "self", ".", "log", ".", "debug", "(", "\"Deploying stack: %s\"", ",", "stack_name", ")", "self", ".", "log", ".", "debug", "(", "\"Deploying template: %s\"", ",", "template", ")", "self", ".", "log", ".", "debug", "(", "\"Deploying parameters: %s\"", ",", "parameters", ")", "self", ".", "log", ".", "debug", "(", "\"Deploying environment: %s\"", ",", "env", ")", "self", ".", "log", ".", "debug", "(", "\"Deploying files: %s\"", ",", "files", ")", "stack_args", "=", "{", "'stack_name'", ":", "stack_name", ",", "'template'", ":", "template", ",", "'parameters'", ":", "parameters", ",", "'environment'", ":", "env", ",", "'files'", ":", "files", "}", "if", "timeout", ":", "stack_args", "[", "'timeout_mins'", "]", "=", "timeout", "if", "stack", "is", "None", ":", "self", ".", "log", ".", "info", "(", "\"Performing Heat stack create\"", ")", "orchestration_client", ".", "stacks", ".", "create", "(", "*", "*", "stack_args", ")", "else", ":", "self", ".", "log", ".", "info", "(", "\"Performing Heat stack update\"", ")", "# Make sure existing parameters for stack are reused", "stack_args", "[", "'existing'", "]", "=", "'true'", "orchestration_client", ".", "stacks", ".", "update", "(", "stack", ".", "id", ",", "*", "*", "stack_args", ")", "create_result", "=", "utils", ".", "wait_for_stack_ready", "(", "orchestration_client", ",", "stack_name", ")", "if", "not", "create_result", ":", "if", "stack", "is", "None", ":", "raise", "Exception", "(", "\"Heat Stack create failed.\"", ")", "else", ":", "raise", "Exception", "(", "\"Heat Stack update failed.\"", ")" ]
Verify the Baremetal nodes are available and do a stack update
[ "Verify", "the", "Baremetal", "nodes", "are", "available", "and", "do", "a", "stack", "update" ]
165a166fb2e5a2598380779b35812b8b8478c4fb
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L401-L451
244,765
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/v1/overcloud_deploy.py
DeployOvercloud._pre_heat_deploy
def _pre_heat_deploy(self): """Setup before the Heat stack create or update has been done.""" clients = self.app.client_manager compute_client = clients.compute self.log.debug("Checking hypervisor stats") if utils.check_hypervisor_stats(compute_client) is None: raise exceptions.DeploymentError( "Expected hypervisor stats not met") return True
python
def _pre_heat_deploy(self): """Setup before the Heat stack create or update has been done.""" clients = self.app.client_manager compute_client = clients.compute self.log.debug("Checking hypervisor stats") if utils.check_hypervisor_stats(compute_client) is None: raise exceptions.DeploymentError( "Expected hypervisor stats not met") return True
[ "def", "_pre_heat_deploy", "(", "self", ")", ":", "clients", "=", "self", ".", "app", ".", "client_manager", "compute_client", "=", "clients", ".", "compute", "self", ".", "log", ".", "debug", "(", "\"Checking hypervisor stats\"", ")", "if", "utils", ".", "check_hypervisor_stats", "(", "compute_client", ")", "is", "None", ":", "raise", "exceptions", ".", "DeploymentError", "(", "\"Expected hypervisor stats not met\"", ")", "return", "True" ]
Setup before the Heat stack create or update has been done.
[ "Setup", "before", "the", "Heat", "stack", "create", "or", "update", "has", "been", "done", "." ]
165a166fb2e5a2598380779b35812b8b8478c4fb
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L464-L473
244,766
rdo-management/python-rdomanager-oscplugin
rdomanager_oscplugin/v1/overcloud_deploy.py
DeployOvercloud._deploy_tripleo_heat_templates
def _deploy_tripleo_heat_templates(self, stack, parsed_args): """Deploy the fixed templates in TripleO Heat Templates""" clients = self.app.client_manager network_client = clients.network parameters = self._update_paramaters( parsed_args, network_client, stack) utils.check_nodes_count( self.app.client_manager.rdomanager_oscplugin.baremetal(), stack, parameters, { 'ControllerCount': 1, 'ComputeCount': 1, 'ObjectStorageCount': 0, 'BlockStorageCount': 0, 'CephStorageCount': 0, } ) tht_root = parsed_args.templates print("Deploying templates in the directory {0}".format( os.path.abspath(tht_root))) self.log.debug("Creating Environment file") env_path = utils.create_environment_file() if stack is None: self.log.debug("Creating Keystone certificates") keystone_pki.generate_certs_into_json(env_path, False) resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME) environments = [resource_registry_path, env_path] if parsed_args.rhel_reg: reg_env = self._create_registration_env(parsed_args) environments.extend(reg_env) if parsed_args.environment_files: environments.extend(parsed_args.environment_files) overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME) self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters, environments, parsed_args.timeout)
python
def _deploy_tripleo_heat_templates(self, stack, parsed_args): """Deploy the fixed templates in TripleO Heat Templates""" clients = self.app.client_manager network_client = clients.network parameters = self._update_paramaters( parsed_args, network_client, stack) utils.check_nodes_count( self.app.client_manager.rdomanager_oscplugin.baremetal(), stack, parameters, { 'ControllerCount': 1, 'ComputeCount': 1, 'ObjectStorageCount': 0, 'BlockStorageCount': 0, 'CephStorageCount': 0, } ) tht_root = parsed_args.templates print("Deploying templates in the directory {0}".format( os.path.abspath(tht_root))) self.log.debug("Creating Environment file") env_path = utils.create_environment_file() if stack is None: self.log.debug("Creating Keystone certificates") keystone_pki.generate_certs_into_json(env_path, False) resource_registry_path = os.path.join(tht_root, RESOURCE_REGISTRY_NAME) environments = [resource_registry_path, env_path] if parsed_args.rhel_reg: reg_env = self._create_registration_env(parsed_args) environments.extend(reg_env) if parsed_args.environment_files: environments.extend(parsed_args.environment_files) overcloud_yaml = os.path.join(tht_root, OVERCLOUD_YAML_NAME) self._heat_deploy(stack, parsed_args.stack, overcloud_yaml, parameters, environments, parsed_args.timeout)
[ "def", "_deploy_tripleo_heat_templates", "(", "self", ",", "stack", ",", "parsed_args", ")", ":", "clients", "=", "self", ".", "app", ".", "client_manager", "network_client", "=", "clients", ".", "network", "parameters", "=", "self", ".", "_update_paramaters", "(", "parsed_args", ",", "network_client", ",", "stack", ")", "utils", ".", "check_nodes_count", "(", "self", ".", "app", ".", "client_manager", ".", "rdomanager_oscplugin", ".", "baremetal", "(", ")", ",", "stack", ",", "parameters", ",", "{", "'ControllerCount'", ":", "1", ",", "'ComputeCount'", ":", "1", ",", "'ObjectStorageCount'", ":", "0", ",", "'BlockStorageCount'", ":", "0", ",", "'CephStorageCount'", ":", "0", ",", "}", ")", "tht_root", "=", "parsed_args", ".", "templates", "print", "(", "\"Deploying templates in the directory {0}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "tht_root", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "\"Creating Environment file\"", ")", "env_path", "=", "utils", ".", "create_environment_file", "(", ")", "if", "stack", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"Creating Keystone certificates\"", ")", "keystone_pki", ".", "generate_certs_into_json", "(", "env_path", ",", "False", ")", "resource_registry_path", "=", "os", ".", "path", ".", "join", "(", "tht_root", ",", "RESOURCE_REGISTRY_NAME", ")", "environments", "=", "[", "resource_registry_path", ",", "env_path", "]", "if", "parsed_args", ".", "rhel_reg", ":", "reg_env", "=", "self", ".", "_create_registration_env", "(", "parsed_args", ")", "environments", ".", "extend", "(", "reg_env", ")", "if", "parsed_args", ".", "environment_files", ":", "environments", ".", "extend", "(", "parsed_args", ".", "environment_files", ")", "overcloud_yaml", "=", "os", ".", "path", ".", "join", "(", "tht_root", ",", "OVERCLOUD_YAML_NAME", ")", "self", ".", "_heat_deploy", "(", "stack", ",", "parsed_args", ".", "stack", ",", "overcloud_yaml", ",", "parameters", ",", "environments", ",", "parsed_args", ".", "timeout", ")" ]
Deploy the fixed templates in TripleO Heat Templates
[ "Deploy", "the", "fixed", "templates", "in", "TripleO", "Heat", "Templates" ]
165a166fb2e5a2598380779b35812b8b8478c4fb
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/overcloud_deploy.py#L475-L520
244,767
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.well_fields
def well_fields(self, well_x=1, well_y=1): """All ScanFieldData elements of given well. Parameters ---------- well_x : int well_y : int Returns ------- list of lxml.objectify.ObjectifiedElement All ScanFieldData elements of given well. """ xpath = './ScanFieldArray/ScanFieldData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) return self.root.findall(xpath)
python
def well_fields(self, well_x=1, well_y=1): """All ScanFieldData elements of given well. Parameters ---------- well_x : int well_y : int Returns ------- list of lxml.objectify.ObjectifiedElement All ScanFieldData elements of given well. """ xpath = './ScanFieldArray/ScanFieldData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) return self.root.findall(xpath)
[ "def", "well_fields", "(", "self", ",", "well_x", "=", "1", ",", "well_y", "=", "1", ")", ":", "xpath", "=", "'./ScanFieldArray/ScanFieldData'", "xpath", "+=", "_xpath_attrib", "(", "'WellX'", ",", "well_x", ")", "xpath", "+=", "_xpath_attrib", "(", "'WellY'", ",", "well_y", ")", "return", "self", ".", "root", ".", "findall", "(", "xpath", ")" ]
All ScanFieldData elements of given well. Parameters ---------- well_x : int well_y : int Returns ------- list of lxml.objectify.ObjectifiedElement All ScanFieldData elements of given well.
[ "All", "ScanFieldData", "elements", "of", "given", "well", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L59-L75
244,768
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.well
def well(self, well_x=1, well_y=1): """ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement """ xpath = './ScanWellData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) # assume we find only one return self.well_array.find(xpath)
python
def well(self, well_x=1, well_y=1): """ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement """ xpath = './ScanWellData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) # assume we find only one return self.well_array.find(xpath)
[ "def", "well", "(", "self", ",", "well_x", "=", "1", ",", "well_y", "=", "1", ")", ":", "xpath", "=", "'./ScanWellData'", "xpath", "+=", "_xpath_attrib", "(", "'WellX'", ",", "well_x", ")", "xpath", "+=", "_xpath_attrib", "(", "'WellY'", ",", "well_y", ")", "# assume we find only one", "return", "self", ".", "well_array", ".", "find", "(", "xpath", ")" ]
ScanWellData of specific well. Parameters ---------- well_x : int well_y : int Returns ------- lxml.objectify.ObjectifiedElement
[ "ScanWellData", "of", "specific", "well", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L78-L94
244,769
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.field
def field(self, well_x=1, well_y=1, field_x=1, field_y=1): """ScanFieldData of specified field. Parameters ---------- well_x : int well_y : int field_x : int field_y : int Returns ------- lxml.objectify.ObjectifiedElement ScanFieldArray/ScanFieldData element. """ xpath = './ScanFieldArray/ScanFieldData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) xpath += _xpath_attrib('FieldX', field_x) xpath += _xpath_attrib('FieldY', field_y) # assume we find only one return self.root.find(xpath)
python
def field(self, well_x=1, well_y=1, field_x=1, field_y=1): """ScanFieldData of specified field. Parameters ---------- well_x : int well_y : int field_x : int field_y : int Returns ------- lxml.objectify.ObjectifiedElement ScanFieldArray/ScanFieldData element. """ xpath = './ScanFieldArray/ScanFieldData' xpath += _xpath_attrib('WellX', well_x) xpath += _xpath_attrib('WellY', well_y) xpath += _xpath_attrib('FieldX', field_x) xpath += _xpath_attrib('FieldY', field_y) # assume we find only one return self.root.find(xpath)
[ "def", "field", "(", "self", ",", "well_x", "=", "1", ",", "well_y", "=", "1", ",", "field_x", "=", "1", ",", "field_y", "=", "1", ")", ":", "xpath", "=", "'./ScanFieldArray/ScanFieldData'", "xpath", "+=", "_xpath_attrib", "(", "'WellX'", ",", "well_x", ")", "xpath", "+=", "_xpath_attrib", "(", "'WellY'", ",", "well_y", ")", "xpath", "+=", "_xpath_attrib", "(", "'FieldX'", ",", "field_x", ")", "xpath", "+=", "_xpath_attrib", "(", "'FieldY'", ",", "field_y", ")", "# assume we find only one", "return", "self", ".", "root", ".", "find", "(", "xpath", ")" ]
ScanFieldData of specified field. Parameters ---------- well_x : int well_y : int field_x : int field_y : int Returns ------- lxml.objectify.ObjectifiedElement ScanFieldArray/ScanFieldData element.
[ "ScanFieldData", "of", "specified", "field", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L134-L155
244,770
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.update_start_position
def update_start_position(self): "Set start position of experiment to position of first field." x_start = self.field_array.ScanFieldData.FieldXCoordinate y_start = self.field_array.ScanFieldData.FieldYCoordinate # empty template have all fields positions set to zero # --> avoid overwriting start position if x_start != 0 and y_start != 0: self.properties.ScanFieldStageStartPositionX = int(x_start * 1e6) # in um self.properties.ScanFieldStageStartPositionY = int(y_start * 1e6)
python
def update_start_position(self): "Set start position of experiment to position of first field." x_start = self.field_array.ScanFieldData.FieldXCoordinate y_start = self.field_array.ScanFieldData.FieldYCoordinate # empty template have all fields positions set to zero # --> avoid overwriting start position if x_start != 0 and y_start != 0: self.properties.ScanFieldStageStartPositionX = int(x_start * 1e6) # in um self.properties.ScanFieldStageStartPositionY = int(y_start * 1e6)
[ "def", "update_start_position", "(", "self", ")", ":", "x_start", "=", "self", ".", "field_array", ".", "ScanFieldData", ".", "FieldXCoordinate", "y_start", "=", "self", ".", "field_array", ".", "ScanFieldData", ".", "FieldYCoordinate", "# empty template have all fields positions set to zero", "# --> avoid overwriting start position", "if", "x_start", "!=", "0", "and", "y_start", "!=", "0", ":", "self", ".", "properties", ".", "ScanFieldStageStartPositionX", "=", "int", "(", "x_start", "*", "1e6", ")", "# in um", "self", ".", "properties", ".", "ScanFieldStageStartPositionY", "=", "int", "(", "y_start", "*", "1e6", ")" ]
Set start position of experiment to position of first field.
[ "Set", "start", "position", "of", "experiment", "to", "position", "of", "first", "field", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L158-L167
244,771
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.update_counts
def update_counts(self): "Update counts of fields and wells." # Properties.attrib['TotalCountOfFields'] fields = str(len(self.fields)) self.properties.attrib['TotalCountOfFields'] = fields # Properties.CountOfWellsX/Y wx, wy = (str(x) for x in self.count_of_wells) self.properties.CountOfWellsX = wx self.properties.CountOfWellsY = wy # Properties.attrib['TotalCountOfWells'] wells = str(len(self.wells)) self.properties.attrib['TotalCountOfWells'] = wells # Properties.attrib['TotalAssignedJobs'] self.properties.attrib['TotalAssignedJobs'] = str(self.count_of_assigned_jobs)
python
def update_counts(self): "Update counts of fields and wells." # Properties.attrib['TotalCountOfFields'] fields = str(len(self.fields)) self.properties.attrib['TotalCountOfFields'] = fields # Properties.CountOfWellsX/Y wx, wy = (str(x) for x in self.count_of_wells) self.properties.CountOfWellsX = wx self.properties.CountOfWellsY = wy # Properties.attrib['TotalCountOfWells'] wells = str(len(self.wells)) self.properties.attrib['TotalCountOfWells'] = wells # Properties.attrib['TotalAssignedJobs'] self.properties.attrib['TotalAssignedJobs'] = str(self.count_of_assigned_jobs)
[ "def", "update_counts", "(", "self", ")", ":", "# Properties.attrib['TotalCountOfFields']", "fields", "=", "str", "(", "len", "(", "self", ".", "fields", ")", ")", "self", ".", "properties", ".", "attrib", "[", "'TotalCountOfFields'", "]", "=", "fields", "# Properties.CountOfWellsX/Y", "wx", ",", "wy", "=", "(", "str", "(", "x", ")", "for", "x", "in", "self", ".", "count_of_wells", ")", "self", ".", "properties", ".", "CountOfWellsX", "=", "wx", "self", ".", "properties", ".", "CountOfWellsY", "=", "wy", "# Properties.attrib['TotalCountOfWells']", "wells", "=", "str", "(", "len", "(", "self", ".", "wells", ")", ")", "self", ".", "properties", ".", "attrib", "[", "'TotalCountOfWells'", "]", "=", "wells", "# Properties.attrib['TotalAssignedJobs']", "self", ".", "properties", ".", "attrib", "[", "'TotalAssignedJobs'", "]", "=", "str", "(", "self", ".", "count_of_assigned_jobs", ")" ]
Update counts of fields and wells.
[ "Update", "counts", "of", "fields", "and", "wells", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L210-L227
244,772
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.remove_well
def remove_well(self, well_x, well_y): """Remove well and associated scan fields. Parameters ---------- well_x : int well_y : int Raises ------ AttributeError If well not found. """ well = self.well(well_x, well_y) if well == None: raise AttributeError('Well not found') self.well_array.remove(well) # remove associated fields fields = self.well_fields(well_x, well_y) for f in fields: self.field_array.remove(f)
python
def remove_well(self, well_x, well_y): """Remove well and associated scan fields. Parameters ---------- well_x : int well_y : int Raises ------ AttributeError If well not found. """ well = self.well(well_x, well_y) if well == None: raise AttributeError('Well not found') self.well_array.remove(well) # remove associated fields fields = self.well_fields(well_x, well_y) for f in fields: self.field_array.remove(f)
[ "def", "remove_well", "(", "self", ",", "well_x", ",", "well_y", ")", ":", "well", "=", "self", ".", "well", "(", "well_x", ",", "well_y", ")", "if", "well", "==", "None", ":", "raise", "AttributeError", "(", "'Well not found'", ")", "self", ".", "well_array", ".", "remove", "(", "well", ")", "# remove associated fields", "fields", "=", "self", ".", "well_fields", "(", "well_x", ",", "well_y", ")", "for", "f", "in", "fields", ":", "self", ".", "field_array", ".", "remove", "(", "f", ")" ]
Remove well and associated scan fields. Parameters ---------- well_x : int well_y : int Raises ------ AttributeError If well not found.
[ "Remove", "well", "and", "associated", "scan", "fields", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L230-L251
244,773
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.field_exists
def field_exists(self, well_x, well_y, field_x, field_y): "Check if field exists ScanFieldArray." return self.field(well_x, well_y, field_x, field_y) != None
python
def field_exists(self, well_x, well_y, field_x, field_y): "Check if field exists ScanFieldArray." return self.field(well_x, well_y, field_x, field_y) != None
[ "def", "field_exists", "(", "self", ",", "well_x", ",", "well_y", ",", "field_x", ",", "field_y", ")", ":", "return", "self", ".", "field", "(", "well_x", ",", "well_y", ",", "field_x", ",", "field_y", ")", "!=", "None" ]
Check if field exists ScanFieldArray.
[ "Check", "if", "field", "exists", "ScanFieldArray", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L259-L261
244,774
arve0/leicascanningtemplate
leicascanningtemplate/template.py
ScanningTemplate.write
def write(self, filename=None): """Save template to xml. Before saving template will update date, start position, well positions, and counts. Parameters ---------- filename : str If not set, XML will be written to self.filename. """ if not filename: filename = self.filename # update time self.properties.CurrentDate = _current_time() # set rubber band to true self.properties.EnableRubberBand = 'true' # update start position self.update_start_position() # update well postions self.update_well_positions() # update counts self.update_counts() # remove py:pytype attributes objectify.deannotate(self.root) # remove namespaces added by lxml for child in self.root.iterchildren(): etree.cleanup_namespaces(child) xml = etree.tostring(self.root, encoding='utf8', xml_declaration=True, pretty_print=True) # fix format quirks # add carriage return character xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines()) # add space at "end/>" --> "end />" xml = re.sub(r'(["a-z])/>', r'\1 />', xml) xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"') with open(filename, 'wb') as f: f.write(xml.encode('utf8'))
python
def write(self, filename=None): """Save template to xml. Before saving template will update date, start position, well positions, and counts. Parameters ---------- filename : str If not set, XML will be written to self.filename. """ if not filename: filename = self.filename # update time self.properties.CurrentDate = _current_time() # set rubber band to true self.properties.EnableRubberBand = 'true' # update start position self.update_start_position() # update well postions self.update_well_positions() # update counts self.update_counts() # remove py:pytype attributes objectify.deannotate(self.root) # remove namespaces added by lxml for child in self.root.iterchildren(): etree.cleanup_namespaces(child) xml = etree.tostring(self.root, encoding='utf8', xml_declaration=True, pretty_print=True) # fix format quirks # add carriage return character xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines()) # add space at "end/>" --> "end />" xml = re.sub(r'(["a-z])/>', r'\1 />', xml) xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"') with open(filename, 'wb') as f: f.write(xml.encode('utf8'))
[ "def", "write", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "not", "filename", ":", "filename", "=", "self", ".", "filename", "# update time", "self", ".", "properties", ".", "CurrentDate", "=", "_current_time", "(", ")", "# set rubber band to true", "self", ".", "properties", ".", "EnableRubberBand", "=", "'true'", "# update start position", "self", ".", "update_start_position", "(", ")", "# update well postions", "self", ".", "update_well_positions", "(", ")", "# update counts", "self", ".", "update_counts", "(", ")", "# remove py:pytype attributes", "objectify", ".", "deannotate", "(", "self", ".", "root", ")", "# remove namespaces added by lxml", "for", "child", "in", "self", ".", "root", ".", "iterchildren", "(", ")", ":", "etree", ".", "cleanup_namespaces", "(", "child", ")", "xml", "=", "etree", ".", "tostring", "(", "self", ".", "root", ",", "encoding", "=", "'utf8'", ",", "xml_declaration", "=", "True", ",", "pretty_print", "=", "True", ")", "# fix format quirks", "# add carriage return character", "xml", "=", "u'\\r\\n'", ".", "join", "(", "l", ".", "decode", "(", "encoding", "=", "'utf8'", ")", "for", "l", "in", "xml", ".", "splitlines", "(", ")", ")", "# add space at \"end/>\" --> \"end />\"", "xml", "=", "re", ".", "sub", "(", "r'([\"a-z])/>'", ",", "r'\\1 />'", ",", "xml", ")", "xml", "=", "xml", ".", "replace", "(", "\"version='1.0' encoding='utf8'\"", ",", "'version=\"1.0\"'", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "xml", ".", "encode", "(", "'utf8'", ")", ")" ]
Save template to xml. Before saving template will update date, start position, well positions, and counts. Parameters ---------- filename : str If not set, XML will be written to self.filename.
[ "Save", "template", "to", "xml", ".", "Before", "saving", "template", "will", "update", "date", "start", "position", "well", "positions", "and", "counts", "." ]
053e075d3bed11e335b61ce048c47067b8e9e921
https://github.com/arve0/leicascanningtemplate/blob/053e075d3bed11e335b61ce048c47067b8e9e921/leicascanningtemplate/template.py#L370-L415
244,775
mbodenhamer/syn
syn/base/a/meta.py
preserve_attr_data
def preserve_attr_data(A, B): '''Preserve attr data for combining B into A. ''' for attr, B_data in B.items(): # defined object attrs if getattr(B_data, 'override_parent', True): continue if attr in A: A_data = A[attr] for _attr in getattr(A_data, '_attrs', []): # Attr attrs, like type, default, & doc if hasattr(A_data, _attr): if getattr(B_data, _attr, None) is not None: if _attr in getattr(B_data, '_set_by_default', []): setattr(B_data, _attr, getattr(A_data, _attr)) else: setattr(B_data, _attr, getattr(A_data, _attr))
python
def preserve_attr_data(A, B): '''Preserve attr data for combining B into A. ''' for attr, B_data in B.items(): # defined object attrs if getattr(B_data, 'override_parent', True): continue if attr in A: A_data = A[attr] for _attr in getattr(A_data, '_attrs', []): # Attr attrs, like type, default, & doc if hasattr(A_data, _attr): if getattr(B_data, _attr, None) is not None: if _attr in getattr(B_data, '_set_by_default', []): setattr(B_data, _attr, getattr(A_data, _attr)) else: setattr(B_data, _attr, getattr(A_data, _attr))
[ "def", "preserve_attr_data", "(", "A", ",", "B", ")", ":", "for", "attr", ",", "B_data", "in", "B", ".", "items", "(", ")", ":", "# defined object attrs", "if", "getattr", "(", "B_data", ",", "'override_parent'", ",", "True", ")", ":", "continue", "if", "attr", "in", "A", ":", "A_data", "=", "A", "[", "attr", "]", "for", "_attr", "in", "getattr", "(", "A_data", ",", "'_attrs'", ",", "[", "]", ")", ":", "# Attr attrs, like type, default, & doc", "if", "hasattr", "(", "A_data", ",", "_attr", ")", ":", "if", "getattr", "(", "B_data", ",", "_attr", ",", "None", ")", "is", "not", "None", ":", "if", "_attr", "in", "getattr", "(", "B_data", ",", "'_set_by_default'", ",", "[", "]", ")", ":", "setattr", "(", "B_data", ",", "_attr", ",", "getattr", "(", "A_data", ",", "_attr", ")", ")", "else", ":", "setattr", "(", "B_data", ",", "_attr", ",", "getattr", "(", "A_data", ",", "_attr", ")", ")" ]
Preserve attr data for combining B into A.
[ "Preserve", "attr", "data", "for", "combining", "B", "into", "A", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base/a/meta.py#L13-L27
244,776
mbodenhamer/syn
syn/base/a/meta.py
graft
def graft(coll, branch, index): '''Graft list branch into coll at index ''' pre = coll[:index] post = coll[index:] ret = pre + branch + post return ret
python
def graft(coll, branch, index): '''Graft list branch into coll at index ''' pre = coll[:index] post = coll[index:] ret = pre + branch + post return ret
[ "def", "graft", "(", "coll", ",", "branch", ",", "index", ")", ":", "pre", "=", "coll", "[", ":", "index", "]", "post", "=", "coll", "[", "index", ":", "]", "ret", "=", "pre", "+", "branch", "+", "post", "return", "ret" ]
Graft list branch into coll at index
[ "Graft", "list", "branch", "into", "coll", "at", "index" ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base/a/meta.py#L29-L35
244,777
mbodenhamer/syn
syn/base/a/meta.py
metaclasses
def metaclasses(bases): ''' Returns 'proper' metaclasses for the classes in bases ''' ret = [] metas = [type(base) for base in bases] for k,meta in enumerate(metas): if not any(issubclass(m, meta) for m in metas[k+1:]): ret.append(meta) if type in ret: ret.remove(type) return ret
python
def metaclasses(bases): ''' Returns 'proper' metaclasses for the classes in bases ''' ret = [] metas = [type(base) for base in bases] for k,meta in enumerate(metas): if not any(issubclass(m, meta) for m in metas[k+1:]): ret.append(meta) if type in ret: ret.remove(type) return ret
[ "def", "metaclasses", "(", "bases", ")", ":", "ret", "=", "[", "]", "metas", "=", "[", "type", "(", "base", ")", "for", "base", "in", "bases", "]", "for", "k", ",", "meta", "in", "enumerate", "(", "metas", ")", ":", "if", "not", "any", "(", "issubclass", "(", "m", ",", "meta", ")", "for", "m", "in", "metas", "[", "k", "+", "1", ":", "]", ")", ":", "ret", ".", "append", "(", "meta", ")", "if", "type", "in", "ret", ":", "ret", ".", "remove", "(", "type", ")", "return", "ret" ]
Returns 'proper' metaclasses for the classes in bases
[ "Returns", "proper", "metaclasses", "for", "the", "classes", "in", "bases" ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base/a/meta.py#L37-L48
244,778
mbodenhamer/syn
syn/base/a/meta.py
Meta._combine_attr_fast_update
def _combine_attr_fast_update(self, attr, typ): '''Avoids having to call _update for each intermediate base. Only works for class attr of type UpdateDict. ''' values = dict(getattr(self, attr, {})) for base in self._class_data.bases: vals = dict(getattr(base, attr, {})) preserve_attr_data(vals, values) values = combine(vals, values) setattr(self, attr, typ(values))
python
def _combine_attr_fast_update(self, attr, typ): '''Avoids having to call _update for each intermediate base. Only works for class attr of type UpdateDict. ''' values = dict(getattr(self, attr, {})) for base in self._class_data.bases: vals = dict(getattr(base, attr, {})) preserve_attr_data(vals, values) values = combine(vals, values) setattr(self, attr, typ(values))
[ "def", "_combine_attr_fast_update", "(", "self", ",", "attr", ",", "typ", ")", ":", "values", "=", "dict", "(", "getattr", "(", "self", ",", "attr", ",", "{", "}", ")", ")", "for", "base", "in", "self", ".", "_class_data", ".", "bases", ":", "vals", "=", "dict", "(", "getattr", "(", "base", ",", "attr", ",", "{", "}", ")", ")", "preserve_attr_data", "(", "vals", ",", "values", ")", "values", "=", "combine", "(", "vals", ",", "values", ")", "setattr", "(", "self", ",", "attr", ",", "typ", "(", "values", ")", ")" ]
Avoids having to call _update for each intermediate base. Only works for class attr of type UpdateDict.
[ "Avoids", "having", "to", "call", "_update", "for", "each", "intermediate", "base", ".", "Only", "works", "for", "class", "attr", "of", "type", "UpdateDict", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base/a/meta.py#L193-L205
244,779
pudo-attic/loadkit
loadkit/cli.py
cli
def cli(ctx, collections, threads, debug): """ A configurable data and document processing tool. """ ctx.obj = { 'collections': collections, 'debug': debug, 'threads': threads } if debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO)
python
def cli(ctx, collections, threads, debug): """ A configurable data and document processing tool. """ ctx.obj = { 'collections': collections, 'debug': debug, 'threads': threads } if debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO)
[ "def", "cli", "(", "ctx", ",", "collections", ",", "threads", ",", "debug", ")", ":", "ctx", ".", "obj", "=", "{", "'collections'", ":", "collections", ",", "'debug'", ":", "debug", ",", "'threads'", ":", "threads", "}", "if", "debug", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")" ]
A configurable data and document processing tool.
[ "A", "configurable", "data", "and", "document", "processing", "tool", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/cli.py#L50-L60
244,780
pudo-attic/loadkit
loadkit/node.py
resolve_dependencies
def resolve_dependencies(nodes): """ Figure out which order the nodes in the graph can be executed in to satisfy all requirements. """ done = set() while True: if len(done) == len(nodes): break for node in nodes: if node.name not in done: match = done.intersection(node.requires) if len(match) == len(node.requires): done.add(node.name) yield node break else: raise ConfigException('Invalid requirements in pipeline!')
python
def resolve_dependencies(nodes): """ Figure out which order the nodes in the graph can be executed in to satisfy all requirements. """ done = set() while True: if len(done) == len(nodes): break for node in nodes: if node.name not in done: match = done.intersection(node.requires) if len(match) == len(node.requires): done.add(node.name) yield node break else: raise ConfigException('Invalid requirements in pipeline!')
[ "def", "resolve_dependencies", "(", "nodes", ")", ":", "done", "=", "set", "(", ")", "while", "True", ":", "if", "len", "(", "done", ")", "==", "len", "(", "nodes", ")", ":", "break", "for", "node", "in", "nodes", ":", "if", "node", ".", "name", "not", "in", "done", ":", "match", "=", "done", ".", "intersection", "(", "node", ".", "requires", ")", "if", "len", "(", "match", ")", "==", "len", "(", "node", ".", "requires", ")", ":", "done", ".", "add", "(", "node", ".", "name", ")", "yield", "node", "break", "else", ":", "raise", "ConfigException", "(", "'Invalid requirements in pipeline!'", ")" ]
Figure out which order the nodes in the graph can be executed in to satisfy all requirements.
[ "Figure", "out", "which", "order", "the", "nodes", "in", "the", "graph", "can", "be", "executed", "in", "to", "satisfy", "all", "requirements", "." ]
1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c
https://github.com/pudo-attic/loadkit/blob/1fb17e69e2ffaf3dac4f40b574c3b7afb2198b7c/loadkit/node.py#L9-L24
244,781
samastur/pyimagediet
pyimagediet/process.py
read_yaml_configuration
def read_yaml_configuration(filename): '''Parse configuration in YAML format into a Python dict :param filename: filename of a file with configuration in YAML format :return: unprocessed configuration object :rtype: dict ''' with open(filename, 'r') as f: config = yaml.load(f.read()) return config
python
def read_yaml_configuration(filename): '''Parse configuration in YAML format into a Python dict :param filename: filename of a file with configuration in YAML format :return: unprocessed configuration object :rtype: dict ''' with open(filename, 'r') as f: config = yaml.load(f.read()) return config
[ "def", "read_yaml_configuration", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "config", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", "return", "config" ]
Parse configuration in YAML format into a Python dict :param filename: filename of a file with configuration in YAML format :return: unprocessed configuration object :rtype: dict
[ "Parse", "configuration", "in", "YAML", "format", "into", "a", "Python", "dict" ]
480c6e171577df36e166590b031bc8891b3c9e7b
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L34-L43
244,782
samastur/pyimagediet
pyimagediet/process.py
determine_type
def determine_type(filename): '''Determine the file type and return it.''' ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
python
def determine_type(filename): '''Determine the file type and return it.''' ftype = magic.from_file(filename, mime=True).decode('utf8') if ftype == 'text/plain': ftype = 'text' elif ftype == 'image/svg+xml': ftype = 'svg' else: ftype = ftype.split('/')[1] return ftype
[ "def", "determine_type", "(", "filename", ")", ":", "ftype", "=", "magic", ".", "from_file", "(", "filename", ",", "mime", "=", "True", ")", ".", "decode", "(", "'utf8'", ")", "if", "ftype", "==", "'text/plain'", ":", "ftype", "=", "'text'", "elif", "ftype", "==", "'image/svg+xml'", ":", "ftype", "=", "'svg'", "else", ":", "ftype", "=", "ftype", ".", "split", "(", "'/'", ")", "[", "1", "]", "return", "ftype" ]
Determine the file type and return it.
[ "Determine", "the", "file", "type", "and", "return", "it", "." ]
480c6e171577df36e166590b031bc8891b3c9e7b
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L46-L55
244,783
samastur/pyimagediet
pyimagediet/process.py
update_configuration
def update_configuration(orig, new): '''Update existing configuration with new values. Needed because dict.update is shallow and would overwrite nested dicts. Function updates sections commands, parameters and pipelines and adds any new items listed in updates. :param orig: configuration to update :type orig: dict :param new: new updated values :type orig: dict ''' dicts = ('commands', 'parameters', 'pipelines') for key in dicts: if key in new: orig[key].update(new[key]) for key in new: if key not in dicts: orig[key] = new[key]
python
def update_configuration(orig, new): '''Update existing configuration with new values. Needed because dict.update is shallow and would overwrite nested dicts. Function updates sections commands, parameters and pipelines and adds any new items listed in updates. :param orig: configuration to update :type orig: dict :param new: new updated values :type orig: dict ''' dicts = ('commands', 'parameters', 'pipelines') for key in dicts: if key in new: orig[key].update(new[key]) for key in new: if key not in dicts: orig[key] = new[key]
[ "def", "update_configuration", "(", "orig", ",", "new", ")", ":", "dicts", "=", "(", "'commands'", ",", "'parameters'", ",", "'pipelines'", ")", "for", "key", "in", "dicts", ":", "if", "key", "in", "new", ":", "orig", "[", "key", "]", ".", "update", "(", "new", "[", "key", "]", ")", "for", "key", "in", "new", ":", "if", "key", "not", "in", "dicts", ":", "orig", "[", "key", "]", "=", "new", "[", "key", "]" ]
Update existing configuration with new values. Needed because dict.update is shallow and would overwrite nested dicts. Function updates sections commands, parameters and pipelines and adds any new items listed in updates. :param orig: configuration to update :type orig: dict :param new: new updated values :type orig: dict
[ "Update", "existing", "configuration", "with", "new", "values", ".", "Needed", "because", "dict", ".", "update", "is", "shallow", "and", "would", "overwrite", "nested", "dicts", "." ]
480c6e171577df36e166590b031bc8891b3c9e7b
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L104-L123
244,784
samastur/pyimagediet
pyimagediet/process.py
check_configuration
def check_configuration(config): '''Check if configuration object is not malformed. :param config: configuration :type config: dict :return: is configuration correct? :rtype: bool ''' sections = ('commands', 'parameters', 'pipelines') # Check all sections are there and contain dicts for section in sections: if section not in config: error_msg = 'Error: Section {0} is missing.'.format(section) raise ConfigurationErrorDietException(error_msg) if not isinstance(config[section], dict): error_msg = 'Error: Section {0} is malformed.'.format(section) raise ConfigurationErrorDietException(error_msg) # Check every command has a corresponding parameters entry commands_cmds = set(list(config['commands'].keys())) parameters_cmds = set(list(config['parameters'].keys())) if commands_cmds != parameters_cmds: error_msg = ('Every command in commands and parameters section has to ' 'have a corresponding entry in the other section.') raise ConfigurationErrorDietException(error_msg) # Check pipelines section contains lists as values and each of them only # has entries listed in commands section for cmd in config['pipelines']: pipeline = config['pipelines'][cmd] if not isinstance(pipeline, list): error_msg = ('Error: Pipeline {0} is malformed. Values have to ' 'be a list of command names.').format(cmd) raise ConfigurationErrorDietException(error_msg) for tool in pipeline: if tool not in commands_cmds: error_msg = ('Error in pipeline {0}. "{1}" cannot be found ' 'among commands listed in commands ' 'section').format(cmd, tool) raise ConfigurationErrorDietException(error_msg)
python
def check_configuration(config): '''Check if configuration object is not malformed. :param config: configuration :type config: dict :return: is configuration correct? :rtype: bool ''' sections = ('commands', 'parameters', 'pipelines') # Check all sections are there and contain dicts for section in sections: if section not in config: error_msg = 'Error: Section {0} is missing.'.format(section) raise ConfigurationErrorDietException(error_msg) if not isinstance(config[section], dict): error_msg = 'Error: Section {0} is malformed.'.format(section) raise ConfigurationErrorDietException(error_msg) # Check every command has a corresponding parameters entry commands_cmds = set(list(config['commands'].keys())) parameters_cmds = set(list(config['parameters'].keys())) if commands_cmds != parameters_cmds: error_msg = ('Every command in commands and parameters section has to ' 'have a corresponding entry in the other section.') raise ConfigurationErrorDietException(error_msg) # Check pipelines section contains lists as values and each of them only # has entries listed in commands section for cmd in config['pipelines']: pipeline = config['pipelines'][cmd] if not isinstance(pipeline, list): error_msg = ('Error: Pipeline {0} is malformed. Values have to ' 'be a list of command names.').format(cmd) raise ConfigurationErrorDietException(error_msg) for tool in pipeline: if tool not in commands_cmds: error_msg = ('Error in pipeline {0}. "{1}" cannot be found ' 'among commands listed in commands ' 'section').format(cmd, tool) raise ConfigurationErrorDietException(error_msg)
[ "def", "check_configuration", "(", "config", ")", ":", "sections", "=", "(", "'commands'", ",", "'parameters'", ",", "'pipelines'", ")", "# Check all sections are there and contain dicts", "for", "section", "in", "sections", ":", "if", "section", "not", "in", "config", ":", "error_msg", "=", "'Error: Section {0} is missing.'", ".", "format", "(", "section", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "if", "not", "isinstance", "(", "config", "[", "section", "]", ",", "dict", ")", ":", "error_msg", "=", "'Error: Section {0} is malformed.'", ".", "format", "(", "section", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "# Check every command has a corresponding parameters entry", "commands_cmds", "=", "set", "(", "list", "(", "config", "[", "'commands'", "]", ".", "keys", "(", ")", ")", ")", "parameters_cmds", "=", "set", "(", "list", "(", "config", "[", "'parameters'", "]", ".", "keys", "(", ")", ")", ")", "if", "commands_cmds", "!=", "parameters_cmds", ":", "error_msg", "=", "(", "'Every command in commands and parameters section has to '", "'have a corresponding entry in the other section.'", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "# Check pipelines section contains lists as values and each of them only", "# has entries listed in commands section", "for", "cmd", "in", "config", "[", "'pipelines'", "]", ":", "pipeline", "=", "config", "[", "'pipelines'", "]", "[", "cmd", "]", "if", "not", "isinstance", "(", "pipeline", ",", "list", ")", ":", "error_msg", "=", "(", "'Error: Pipeline {0} is malformed. Values have to '", "'be a list of command names.'", ")", ".", "format", "(", "cmd", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")", "for", "tool", "in", "pipeline", ":", "if", "tool", "not", "in", "commands_cmds", ":", "error_msg", "=", "(", "'Error in pipeline {0}. \"{1}\" cannot be found '", "'among commands listed in commands '", "'section'", ")", ".", "format", "(", "cmd", ",", "tool", ")", "raise", "ConfigurationErrorDietException", "(", "error_msg", ")" ]
Check if configuration object is not malformed. :param config: configuration :type config: dict :return: is configuration correct? :rtype: bool
[ "Check", "if", "configuration", "object", "is", "not", "malformed", "." ]
480c6e171577df36e166590b031bc8891b3c9e7b
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L128-L167
244,785
samastur/pyimagediet
pyimagediet/process.py
diet
def diet(filename, configuration): ''' Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool ''' changed = False if not isfile(filename): raise NotFileDietException('Passed filename does not point to a file') conf = copy.deepcopy(DEFAULT_CONFIG) if not configuration.get('parsed'): new_config = parse_configuration(configuration) else: new_config = configuration update_configuration(conf, new_config) filetype = determine_type(filename) squeeze_cmd = conf['pipelines'].get(filetype) if squeeze_cmd: tmpbackup_ext = 'diet_internal' ext = conf.get('backup', tmpbackup_ext) backup = backup_file(filename, ext) size = os.stat(filename).st_size new_size = squeeze(squeeze_cmd, filename, backup) if not conf.get('keep_processed', False) and new_size > size: copy_if_different(backup, filename) # Delete backup, if it was internal if not conf.get('backup'): os.remove(backup) changed = True return changed
python
def diet(filename, configuration): ''' Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool ''' changed = False if not isfile(filename): raise NotFileDietException('Passed filename does not point to a file') conf = copy.deepcopy(DEFAULT_CONFIG) if not configuration.get('parsed'): new_config = parse_configuration(configuration) else: new_config = configuration update_configuration(conf, new_config) filetype = determine_type(filename) squeeze_cmd = conf['pipelines'].get(filetype) if squeeze_cmd: tmpbackup_ext = 'diet_internal' ext = conf.get('backup', tmpbackup_ext) backup = backup_file(filename, ext) size = os.stat(filename).st_size new_size = squeeze(squeeze_cmd, filename, backup) if not conf.get('keep_processed', False) and new_size > size: copy_if_different(backup, filename) # Delete backup, if it was internal if not conf.get('backup'): os.remove(backup) changed = True return changed
[ "def", "diet", "(", "filename", ",", "configuration", ")", ":", "changed", "=", "False", "if", "not", "isfile", "(", "filename", ")", ":", "raise", "NotFileDietException", "(", "'Passed filename does not point to a file'", ")", "conf", "=", "copy", ".", "deepcopy", "(", "DEFAULT_CONFIG", ")", "if", "not", "configuration", ".", "get", "(", "'parsed'", ")", ":", "new_config", "=", "parse_configuration", "(", "configuration", ")", "else", ":", "new_config", "=", "configuration", "update_configuration", "(", "conf", ",", "new_config", ")", "filetype", "=", "determine_type", "(", "filename", ")", "squeeze_cmd", "=", "conf", "[", "'pipelines'", "]", ".", "get", "(", "filetype", ")", "if", "squeeze_cmd", ":", "tmpbackup_ext", "=", "'diet_internal'", "ext", "=", "conf", ".", "get", "(", "'backup'", ",", "tmpbackup_ext", ")", "backup", "=", "backup_file", "(", "filename", ",", "ext", ")", "size", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_size", "new_size", "=", "squeeze", "(", "squeeze_cmd", ",", "filename", ",", "backup", ")", "if", "not", "conf", ".", "get", "(", "'keep_processed'", ",", "False", ")", "and", "new_size", ">", "size", ":", "copy_if_different", "(", "backup", ",", "filename", ")", "# Delete backup, if it was internal", "if", "not", "conf", ".", "get", "(", "'backup'", ")", ":", "os", ".", "remove", "(", "backup", ")", "changed", "=", "True", "return", "changed" ]
Squeeze files if there is a pipeline defined for them or leave them be otherwise. :param filename: filename of the file to process :param configuration: configuration dict describing commands and pipelines :type configuration: dict :return: has file changed :rtype: bool
[ "Squeeze", "files", "if", "there", "is", "a", "pipeline", "defined", "for", "them", "or", "leave", "them", "be", "otherwise", "." ]
480c6e171577df36e166590b031bc8891b3c9e7b
https://github.com/samastur/pyimagediet/blob/480c6e171577df36e166590b031bc8891b3c9e7b/pyimagediet/process.py#L220-L262
244,786
sassoo/goldman
goldman/request.py
Request.auth_scheme
def auth_scheme(self): """ If an Authorization header is present get the scheme It is expected to be the first string in a space separated list & will always be returned lowercase. :return: str or None """ try: auth = getattr(self, 'auth') return naked(auth.split(' ')[0]).lower() except (AttributeError, IndexError): return None
python
def auth_scheme(self): """ If an Authorization header is present get the scheme It is expected to be the first string in a space separated list & will always be returned lowercase. :return: str or None """ try: auth = getattr(self, 'auth') return naked(auth.split(' ')[0]).lower() except (AttributeError, IndexError): return None
[ "def", "auth_scheme", "(", "self", ")", ":", "try", ":", "auth", "=", "getattr", "(", "self", ",", "'auth'", ")", "return", "naked", "(", "auth", ".", "split", "(", "' '", ")", "[", "0", "]", ")", ".", "lower", "(", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None" ]
If an Authorization header is present get the scheme It is expected to be the first string in a space separated list & will always be returned lowercase. :return: str or None
[ "If", "an", "Authorization", "header", "is", "present", "get", "the", "scheme" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/request.py#L45-L58
244,787
sassoo/goldman
goldman/request.py
Request._init_content_type_params
def _init_content_type_params(self): """ Return the Content-Type request header parameters Convert all of the semi-colon separated parameters into a dict of key/vals. If for some stupid reason duplicate & conflicting params are present then the last one wins. If a particular content-type param is non-compliant by not being a simple key=val pair then it is skipped. If no content-type header or params are present then return an empty dict. :return: dict """ ret = {} if self.content_type: params = self.content_type.split(';')[1:] for param in params: try: key, val = param.split('=') ret[naked(key)] = naked(val) except ValueError: continue return ret
python
def _init_content_type_params(self): """ Return the Content-Type request header parameters Convert all of the semi-colon separated parameters into a dict of key/vals. If for some stupid reason duplicate & conflicting params are present then the last one wins. If a particular content-type param is non-compliant by not being a simple key=val pair then it is skipped. If no content-type header or params are present then return an empty dict. :return: dict """ ret = {} if self.content_type: params = self.content_type.split(';')[1:] for param in params: try: key, val = param.split('=') ret[naked(key)] = naked(val) except ValueError: continue return ret
[ "def", "_init_content_type_params", "(", "self", ")", ":", "ret", "=", "{", "}", "if", "self", ".", "content_type", ":", "params", "=", "self", ".", "content_type", ".", "split", "(", "';'", ")", "[", "1", ":", "]", "for", "param", "in", "params", ":", "try", ":", "key", ",", "val", "=", "param", ".", "split", "(", "'='", ")", "ret", "[", "naked", "(", "key", ")", "]", "=", "naked", "(", "val", ")", "except", "ValueError", ":", "continue", "return", "ret" ]
Return the Content-Type request header parameters Convert all of the semi-colon separated parameters into a dict of key/vals. If for some stupid reason duplicate & conflicting params are present then the last one wins. If a particular content-type param is non-compliant by not being a simple key=val pair then it is skipped. If no content-type header or params are present then return an empty dict. :return: dict
[ "Return", "the", "Content", "-", "Type", "request", "header", "parameters" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/request.py#L102-L130
244,788
narfman0/helga-markovify
helga_markovify/plugin.py
_handle_match
def _handle_match(client, channel, nick, message, matches): """ Match stores all channel info. If helga is asked something to stimulate a markov response about channel data, then we shall graciously provide it. """ generate_interrogative = _CHANNEL_GENERATE_REGEX.match(message) if generate_interrogative: return generate(_DEFAULT_TOPIC, _ADD_PUNCTUATION) current_topic = db.markovify.find_one({'topic': _DEFAULT_TOPIC}) if current_topic: message = punctuate(current_topic['text'], message, _ADD_PUNCTUATION) try: ingest(_DEFAULT_TOPIC, message) except ValueError as e: # not good, but this is done every message so just move along print str(e)
python
def _handle_match(client, channel, nick, message, matches): """ Match stores all channel info. If helga is asked something to stimulate a markov response about channel data, then we shall graciously provide it. """ generate_interrogative = _CHANNEL_GENERATE_REGEX.match(message) if generate_interrogative: return generate(_DEFAULT_TOPIC, _ADD_PUNCTUATION) current_topic = db.markovify.find_one({'topic': _DEFAULT_TOPIC}) if current_topic: message = punctuate(current_topic['text'], message, _ADD_PUNCTUATION) try: ingest(_DEFAULT_TOPIC, message) except ValueError as e: # not good, but this is done every message so just move along print str(e)
[ "def", "_handle_match", "(", "client", ",", "channel", ",", "nick", ",", "message", ",", "matches", ")", ":", "generate_interrogative", "=", "_CHANNEL_GENERATE_REGEX", ".", "match", "(", "message", ")", "if", "generate_interrogative", ":", "return", "generate", "(", "_DEFAULT_TOPIC", ",", "_ADD_PUNCTUATION", ")", "current_topic", "=", "db", ".", "markovify", ".", "find_one", "(", "{", "'topic'", ":", "_DEFAULT_TOPIC", "}", ")", "if", "current_topic", ":", "message", "=", "punctuate", "(", "current_topic", "[", "'text'", "]", ",", "message", ",", "_ADD_PUNCTUATION", ")", "try", ":", "ingest", "(", "_DEFAULT_TOPIC", ",", "message", ")", "except", "ValueError", "as", "e", ":", "# not good, but this is done every message so just move along", "print", "str", "(", "e", ")" ]
Match stores all channel info. If helga is asked something to stimulate a markov response about channel data, then we shall graciously provide it.
[ "Match", "stores", "all", "channel", "info", ".", "If", "helga", "is", "asked", "something", "to", "stimulate", "a", "markov", "response", "about", "channel", "data", "then", "we", "shall", "graciously", "provide", "it", "." ]
b5a82de070102e6da1fd3f5f81cad12d0a9185d8
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/plugin.py#L70-L85
244,789
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
require_root
def require_root(fn): """ Decorator to make sure, that user is root. """ @wraps(fn) def xex(*args, **kwargs): assert os.geteuid() == 0, \ "You have to be root to run function '%s'." % fn.__name__ return fn(*args, **kwargs) return xex
python
def require_root(fn): """ Decorator to make sure, that user is root. """ @wraps(fn) def xex(*args, **kwargs): assert os.geteuid() == 0, \ "You have to be root to run function '%s'." % fn.__name__ return fn(*args, **kwargs) return xex
[ "def", "require_root", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "xex", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "os", ".", "geteuid", "(", ")", "==", "0", ",", "\"You have to be root to run function '%s'.\"", "%", "fn", ".", "__name__", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "xex" ]
Decorator to make sure, that user is root.
[ "Decorator", "to", "make", "sure", "that", "user", "is", "root", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L37-L47
244,790
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
recursive_chmod
def recursive_chmod(path, mode=0755): """ Recursively change ``mode`` for given ``path``. Same as ``chmod -R mode``. Args: path (str): Path of the directory/file. mode (octal int, default 0755): New mode of the file. Warning: Don't forget to add ``0`` at the beginning of the numbers of `mode`, or `Unspeakable hOrRoRs` will be awaken from their unholy sleep outside of the reality and they WILL eat your soul (and your files). """ passwd_reader.set_permissions(path, mode=mode) if os.path.isfile(path): return # recursively change mode of all subdirectories for root, dirs, files in os.walk(path): for fn in files + dirs: passwd_reader.set_permissions(os.path.join(root, fn), mode=mode)
python
def recursive_chmod(path, mode=0755): """ Recursively change ``mode`` for given ``path``. Same as ``chmod -R mode``. Args: path (str): Path of the directory/file. mode (octal int, default 0755): New mode of the file. Warning: Don't forget to add ``0`` at the beginning of the numbers of `mode`, or `Unspeakable hOrRoRs` will be awaken from their unholy sleep outside of the reality and they WILL eat your soul (and your files). """ passwd_reader.set_permissions(path, mode=mode) if os.path.isfile(path): return # recursively change mode of all subdirectories for root, dirs, files in os.walk(path): for fn in files + dirs: passwd_reader.set_permissions(os.path.join(root, fn), mode=mode)
[ "def", "recursive_chmod", "(", "path", ",", "mode", "=", "0755", ")", ":", "passwd_reader", ".", "set_permissions", "(", "path", ",", "mode", "=", "mode", ")", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "# recursively change mode of all subdirectories", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "fn", "in", "files", "+", "dirs", ":", "passwd_reader", ".", "set_permissions", "(", "os", ".", "path", ".", "join", "(", "root", ",", "fn", ")", ",", "mode", "=", "mode", ")" ]
Recursively change ``mode`` for given ``path``. Same as ``chmod -R mode``. Args: path (str): Path of the directory/file. mode (octal int, default 0755): New mode of the file. Warning: Don't forget to add ``0`` at the beginning of the numbers of `mode`, or `Unspeakable hOrRoRs` will be awaken from their unholy sleep outside of the reality and they WILL eat your soul (and your files).
[ "Recursively", "change", "mode", "for", "given", "path", ".", "Same", "as", "chmod", "-", "R", "mode", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L58-L78
244,791
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
add_user
def add_user(username, password): """ Adds record to passwd-like file for ProFTPD, creates home directory and sets permissions for important files. Args: username (str): User's name. password (str): User's password. """ assert _is_valid_username(username), \ "Invalid format of username '%s'!" % username assert username not in passwd_reader.load_users(), \ "User '%s' is already registered!" % username assert password, "Password is reqired!" # add new user to the proftpd's passwd file home_dir = settings.DATA_PATH + username sh.ftpasswd( passwd=True, # passwd file, not group file name=username, home=home_dir, # chroot in DATA_PATH shell="/bin/false", uid=settings.PROFTPD_USERS_GID, # TODO: parse dynamically? gid=settings.PROFTPD_USERS_GID, stdin=True, # tell ftpasswd to read password from stdin file=settings.LOGIN_FILE, _in=password ) # create home dir if not exists if not os.path.exists(home_dir): os.makedirs(home_dir, 0775) # I am using PROFTPD_USERS_GID (2000) for all our users - this GID # shouldn't be used by other than FTP users! passwd_reader.set_permissions(home_dir, gid=settings.PROFTPD_USERS_GID) passwd_reader.set_permissions(settings.LOGIN_FILE, mode=0600) create_lock_file(home_dir + "/" + settings.LOCK_FILENAME) reload_configuration()
python
def add_user(username, password): """ Adds record to passwd-like file for ProFTPD, creates home directory and sets permissions for important files. Args: username (str): User's name. password (str): User's password. """ assert _is_valid_username(username), \ "Invalid format of username '%s'!" % username assert username not in passwd_reader.load_users(), \ "User '%s' is already registered!" % username assert password, "Password is reqired!" # add new user to the proftpd's passwd file home_dir = settings.DATA_PATH + username sh.ftpasswd( passwd=True, # passwd file, not group file name=username, home=home_dir, # chroot in DATA_PATH shell="/bin/false", uid=settings.PROFTPD_USERS_GID, # TODO: parse dynamically? gid=settings.PROFTPD_USERS_GID, stdin=True, # tell ftpasswd to read password from stdin file=settings.LOGIN_FILE, _in=password ) # create home dir if not exists if not os.path.exists(home_dir): os.makedirs(home_dir, 0775) # I am using PROFTPD_USERS_GID (2000) for all our users - this GID # shouldn't be used by other than FTP users! passwd_reader.set_permissions(home_dir, gid=settings.PROFTPD_USERS_GID) passwd_reader.set_permissions(settings.LOGIN_FILE, mode=0600) create_lock_file(home_dir + "/" + settings.LOCK_FILENAME) reload_configuration()
[ "def", "add_user", "(", "username", ",", "password", ")", ":", "assert", "_is_valid_username", "(", "username", ")", ",", "\"Invalid format of username '%s'!\"", "%", "username", "assert", "username", "not", "in", "passwd_reader", ".", "load_users", "(", ")", ",", "\"User '%s' is already registered!\"", "%", "username", "assert", "password", ",", "\"Password is reqired!\"", "# add new user to the proftpd's passwd file", "home_dir", "=", "settings", ".", "DATA_PATH", "+", "username", "sh", ".", "ftpasswd", "(", "passwd", "=", "True", ",", "# passwd file, not group file", "name", "=", "username", ",", "home", "=", "home_dir", ",", "# chroot in DATA_PATH", "shell", "=", "\"/bin/false\"", ",", "uid", "=", "settings", ".", "PROFTPD_USERS_GID", ",", "# TODO: parse dynamically?", "gid", "=", "settings", ".", "PROFTPD_USERS_GID", ",", "stdin", "=", "True", ",", "# tell ftpasswd to read password from stdin", "file", "=", "settings", ".", "LOGIN_FILE", ",", "_in", "=", "password", ")", "# create home dir if not exists", "if", "not", "os", ".", "path", ".", "exists", "(", "home_dir", ")", ":", "os", ".", "makedirs", "(", "home_dir", ",", "0775", ")", "# I am using PROFTPD_USERS_GID (2000) for all our users - this GID", "# shouldn't be used by other than FTP users!", "passwd_reader", ".", "set_permissions", "(", "home_dir", ",", "gid", "=", "settings", ".", "PROFTPD_USERS_GID", ")", "passwd_reader", ".", "set_permissions", "(", "settings", ".", "LOGIN_FILE", ",", "mode", "=", "0600", ")", "create_lock_file", "(", "home_dir", "+", "\"/\"", "+", "settings", ".", "LOCK_FILENAME", ")", "reload_configuration", "(", ")" ]
Adds record to passwd-like file for ProFTPD, creates home directory and sets permissions for important files. Args: username (str): User's name. password (str): User's password.
[ "Adds", "record", "to", "passwd", "-", "like", "file", "for", "ProFTPD", "creates", "home", "directory", "and", "sets", "permissions", "for", "important", "files", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L107-L149
244,792
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
remove_user
def remove_user(username): """ Remove user, his home directory and so on.. Args: username (str): User's name. """ users = passwd_reader.load_users() assert username in users, "Username '%s' not found!" % username # remove user from passwd file del users[username] passwd_reader.save_users(users) # remove home directory home_dir = settings.DATA_PATH + username if os.path.exists(home_dir): shutil.rmtree(home_dir) reload_configuration()
python
def remove_user(username): """ Remove user, his home directory and so on.. Args: username (str): User's name. """ users = passwd_reader.load_users() assert username in users, "Username '%s' not found!" % username # remove user from passwd file del users[username] passwd_reader.save_users(users) # remove home directory home_dir = settings.DATA_PATH + username if os.path.exists(home_dir): shutil.rmtree(home_dir) reload_configuration()
[ "def", "remove_user", "(", "username", ")", ":", "users", "=", "passwd_reader", ".", "load_users", "(", ")", "assert", "username", "in", "users", ",", "\"Username '%s' not found!\"", "%", "username", "# remove user from passwd file", "del", "users", "[", "username", "]", "passwd_reader", ".", "save_users", "(", "users", ")", "# remove home directory", "home_dir", "=", "settings", ".", "DATA_PATH", "+", "username", "if", "os", ".", "path", ".", "exists", "(", "home_dir", ")", ":", "shutil", ".", "rmtree", "(", "home_dir", ")", "reload_configuration", "(", ")" ]
Remove user, his home directory and so on.. Args: username (str): User's name.
[ "Remove", "user", "his", "home", "directory", "and", "so", "on", ".." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L153-L173
244,793
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/api.py
change_password
def change_password(username, new_password): """ Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password. """ assert username in passwd_reader.load_users(),\ "Username '%s' not found!" % username sh.ftpasswd( "--change-password", passwd=True, # passwd file, not group file name=username, stdin=True, # tell ftpasswd to read password from stdin file=settings.LOGIN_FILE, _in=new_password ) reload_configuration()
python
def change_password(username, new_password): """ Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password. """ assert username in passwd_reader.load_users(),\ "Username '%s' not found!" % username sh.ftpasswd( "--change-password", passwd=True, # passwd file, not group file name=username, stdin=True, # tell ftpasswd to read password from stdin file=settings.LOGIN_FILE, _in=new_password ) reload_configuration()
[ "def", "change_password", "(", "username", ",", "new_password", ")", ":", "assert", "username", "in", "passwd_reader", ".", "load_users", "(", ")", ",", "\"Username '%s' not found!\"", "%", "username", "sh", ".", "ftpasswd", "(", "\"--change-password\"", ",", "passwd", "=", "True", ",", "# passwd file, not group file", "name", "=", "username", ",", "stdin", "=", "True", ",", "# tell ftpasswd to read password from stdin", "file", "=", "settings", ".", "LOGIN_FILE", ",", "_in", "=", "new_password", ")", "reload_configuration", "(", ")" ]
Change password for given `username`. Args: username (str): User's name. new_password (str): User's new password.
[ "Change", "password", "for", "given", "username", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/api.py#L177-L197
244,794
crypto101/clarent
clarent/path.py
getDataPath
def getDataPath(_system=thisSystem, _FilePath=FilePath): """Gets an appropriate path for storing some local data, such as TLS credentials. If the path doesn't exist, it is created. """ if _system == "Windows": pathName = "~/Crypto101/" else: pathName = "~/.crypto101/" path = _FilePath(expanduser(pathName)) if not path.exists(): path.makedirs() return path
python
def getDataPath(_system=thisSystem, _FilePath=FilePath): """Gets an appropriate path for storing some local data, such as TLS credentials. If the path doesn't exist, it is created. """ if _system == "Windows": pathName = "~/Crypto101/" else: pathName = "~/.crypto101/" path = _FilePath(expanduser(pathName)) if not path.exists(): path.makedirs() return path
[ "def", "getDataPath", "(", "_system", "=", "thisSystem", ",", "_FilePath", "=", "FilePath", ")", ":", "if", "_system", "==", "\"Windows\"", ":", "pathName", "=", "\"~/Crypto101/\"", "else", ":", "pathName", "=", "\"~/.crypto101/\"", "path", "=", "_FilePath", "(", "expanduser", "(", "pathName", ")", ")", "if", "not", "path", ".", "exists", "(", ")", ":", "path", ".", "makedirs", "(", ")", "return", "path" ]
Gets an appropriate path for storing some local data, such as TLS credentials. If the path doesn't exist, it is created.
[ "Gets", "an", "appropriate", "path", "for", "storing", "some", "local", "data", "such", "as", "TLS", "credentials", "." ]
2b441d7933e85ee948cfb78506b7ca0a32e9588d
https://github.com/crypto101/clarent/blob/2b441d7933e85ee948cfb78506b7ca0a32e9588d/clarent/path.py#L12-L28
244,795
ludeeus/pycfdns
pycfdns/__init__.py
CloudflareUpdater.get_zoneID
def get_zoneID(self, headers, zone): """Get the zone id for the zone.""" zoneIDurl = self.BASE_URL + '?name=' + zone zoneIDrequest = requests.get(zoneIDurl, headers=headers) zoneID = zoneIDrequest.json()['result'][0]['id'] return zoneID
python
def get_zoneID(self, headers, zone): """Get the zone id for the zone.""" zoneIDurl = self.BASE_URL + '?name=' + zone zoneIDrequest = requests.get(zoneIDurl, headers=headers) zoneID = zoneIDrequest.json()['result'][0]['id'] return zoneID
[ "def", "get_zoneID", "(", "self", ",", "headers", ",", "zone", ")", ":", "zoneIDurl", "=", "self", ".", "BASE_URL", "+", "'?name='", "+", "zone", "zoneIDrequest", "=", "requests", ".", "get", "(", "zoneIDurl", ",", "headers", "=", "headers", ")", "zoneID", "=", "zoneIDrequest", ".", "json", "(", ")", "[", "'result'", "]", "[", "0", "]", "[", "'id'", "]", "return", "zoneID" ]
Get the zone id for the zone.
[ "Get", "the", "zone", "id", "for", "the", "zone", "." ]
0fd027be49d67250f85f2398d006a9409a7dae28
https://github.com/ludeeus/pycfdns/blob/0fd027be49d67250f85f2398d006a9409a7dae28/pycfdns/__init__.py#L26-L31
244,796
ludeeus/pycfdns
pycfdns/__init__.py
CloudflareUpdater.get_recordInfo
def get_recordInfo(self, headers, zoneID, zone, records): """Get the information of the records.""" if 'None' in records: #If ['None'] in record argument, query all. recordQueryEnpoint = '/' + zoneID + '/dns_records&per_page=100' recordUrl = self.BASE_URL + recordQueryEnpoint recordRequest = requests.get(recordUrl, headers=headers) recordResponse = recordRequest.json()['result'] dev = [] num = 0 for value in recordResponse: recordName = recordResponse[num]['name'] dev.append(recordName) num = num + 1 records = dev updateRecords = [] for record in records: if zone in record: recordFullname = record else: recordFullname = record + '.' + zone recordQuery = '/' + zoneID + '/dns_records?name=' + recordFullname recordUrl = self.BASE_URL + recordQuery recordInfoRequest = requests.get(recordUrl, headers=headers) recordInfoResponse = recordInfoRequest.json()['result'][0] recordID = recordInfoResponse['id'] recordType = recordInfoResponse['type'] recordProxy = str(recordInfoResponse['proxied']) recordContent = recordInfoResponse['content'] if recordProxy == 'True': recordProxied = True else: recordProxied = False updateRecords.append([recordID, recordFullname, recordType, recordContent, recordProxied]) return updateRecords
python
def get_recordInfo(self, headers, zoneID, zone, records): """Get the information of the records.""" if 'None' in records: #If ['None'] in record argument, query all. recordQueryEnpoint = '/' + zoneID + '/dns_records&per_page=100' recordUrl = self.BASE_URL + recordQueryEnpoint recordRequest = requests.get(recordUrl, headers=headers) recordResponse = recordRequest.json()['result'] dev = [] num = 0 for value in recordResponse: recordName = recordResponse[num]['name'] dev.append(recordName) num = num + 1 records = dev updateRecords = [] for record in records: if zone in record: recordFullname = record else: recordFullname = record + '.' + zone recordQuery = '/' + zoneID + '/dns_records?name=' + recordFullname recordUrl = self.BASE_URL + recordQuery recordInfoRequest = requests.get(recordUrl, headers=headers) recordInfoResponse = recordInfoRequest.json()['result'][0] recordID = recordInfoResponse['id'] recordType = recordInfoResponse['type'] recordProxy = str(recordInfoResponse['proxied']) recordContent = recordInfoResponse['content'] if recordProxy == 'True': recordProxied = True else: recordProxied = False updateRecords.append([recordID, recordFullname, recordType, recordContent, recordProxied]) return updateRecords
[ "def", "get_recordInfo", "(", "self", ",", "headers", ",", "zoneID", ",", "zone", ",", "records", ")", ":", "if", "'None'", "in", "records", ":", "#If ['None'] in record argument, query all.", "recordQueryEnpoint", "=", "'/'", "+", "zoneID", "+", "'/dns_records&per_page=100'", "recordUrl", "=", "self", ".", "BASE_URL", "+", "recordQueryEnpoint", "recordRequest", "=", "requests", ".", "get", "(", "recordUrl", ",", "headers", "=", "headers", ")", "recordResponse", "=", "recordRequest", ".", "json", "(", ")", "[", "'result'", "]", "dev", "=", "[", "]", "num", "=", "0", "for", "value", "in", "recordResponse", ":", "recordName", "=", "recordResponse", "[", "num", "]", "[", "'name'", "]", "dev", ".", "append", "(", "recordName", ")", "num", "=", "num", "+", "1", "records", "=", "dev", "updateRecords", "=", "[", "]", "for", "record", "in", "records", ":", "if", "zone", "in", "record", ":", "recordFullname", "=", "record", "else", ":", "recordFullname", "=", "record", "+", "'.'", "+", "zone", "recordQuery", "=", "'/'", "+", "zoneID", "+", "'/dns_records?name='", "+", "recordFullname", "recordUrl", "=", "self", ".", "BASE_URL", "+", "recordQuery", "recordInfoRequest", "=", "requests", ".", "get", "(", "recordUrl", ",", "headers", "=", "headers", ")", "recordInfoResponse", "=", "recordInfoRequest", ".", "json", "(", ")", "[", "'result'", "]", "[", "0", "]", "recordID", "=", "recordInfoResponse", "[", "'id'", "]", "recordType", "=", "recordInfoResponse", "[", "'type'", "]", "recordProxy", "=", "str", "(", "recordInfoResponse", "[", "'proxied'", "]", ")", "recordContent", "=", "recordInfoResponse", "[", "'content'", "]", "if", "recordProxy", "==", "'True'", ":", "recordProxied", "=", "True", "else", ":", "recordProxied", "=", "False", "updateRecords", ".", "append", "(", "[", "recordID", ",", "recordFullname", ",", "recordType", ",", "recordContent", ",", "recordProxied", "]", ")", "return", "updateRecords" ]
Get the information of the records.
[ "Get", "the", "information", "of", "the", "records", "." ]
0fd027be49d67250f85f2398d006a9409a7dae28
https://github.com/ludeeus/pycfdns/blob/0fd027be49d67250f85f2398d006a9409a7dae28/pycfdns/__init__.py#L33-L67
244,797
ludeeus/pycfdns
pycfdns/__init__.py
CloudflareUpdater.update_records
def update_records(self, headers, zoneID, updateRecords): """Update DNS records.""" IP = requests.get(self.GET_EXT_IP_URL).text message = True errorsRecords = [] sucessRecords = [] for record in updateRecords: updateEndpoint = '/' + zoneID + '/dns_records/' + record[0] updateUrl = self.BASE_URL + updateEndpoint data = json.dumps({ 'id': zoneID, 'type': record[2], 'name': record[1], 'content': IP, 'proxied': record[4] }) if record[3] != IP and record[2] == 'A': result = requests.put(updateUrl, headers=headers, data=data).json() if result['success'] == True: sucessRecords.append(record[1]) else: errorsRecords.append(record[1]) if errorsRecords != []: message = ("There was an error updating these records: " + str(errorsRecords) + " , the rest is OK.") else: message = ("These records got updated: " + str(sucessRecords)) return message
python
def update_records(self, headers, zoneID, updateRecords): """Update DNS records.""" IP = requests.get(self.GET_EXT_IP_URL).text message = True errorsRecords = [] sucessRecords = [] for record in updateRecords: updateEndpoint = '/' + zoneID + '/dns_records/' + record[0] updateUrl = self.BASE_URL + updateEndpoint data = json.dumps({ 'id': zoneID, 'type': record[2], 'name': record[1], 'content': IP, 'proxied': record[4] }) if record[3] != IP and record[2] == 'A': result = requests.put(updateUrl, headers=headers, data=data).json() if result['success'] == True: sucessRecords.append(record[1]) else: errorsRecords.append(record[1]) if errorsRecords != []: message = ("There was an error updating these records: " + str(errorsRecords) + " , the rest is OK.") else: message = ("These records got updated: " + str(sucessRecords)) return message
[ "def", "update_records", "(", "self", ",", "headers", ",", "zoneID", ",", "updateRecords", ")", ":", "IP", "=", "requests", ".", "get", "(", "self", ".", "GET_EXT_IP_URL", ")", ".", "text", "message", "=", "True", "errorsRecords", "=", "[", "]", "sucessRecords", "=", "[", "]", "for", "record", "in", "updateRecords", ":", "updateEndpoint", "=", "'/'", "+", "zoneID", "+", "'/dns_records/'", "+", "record", "[", "0", "]", "updateUrl", "=", "self", ".", "BASE_URL", "+", "updateEndpoint", "data", "=", "json", ".", "dumps", "(", "{", "'id'", ":", "zoneID", ",", "'type'", ":", "record", "[", "2", "]", ",", "'name'", ":", "record", "[", "1", "]", ",", "'content'", ":", "IP", ",", "'proxied'", ":", "record", "[", "4", "]", "}", ")", "if", "record", "[", "3", "]", "!=", "IP", "and", "record", "[", "2", "]", "==", "'A'", ":", "result", "=", "requests", ".", "put", "(", "updateUrl", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", ".", "json", "(", ")", "if", "result", "[", "'success'", "]", "==", "True", ":", "sucessRecords", ".", "append", "(", "record", "[", "1", "]", ")", "else", ":", "errorsRecords", ".", "append", "(", "record", "[", "1", "]", ")", "if", "errorsRecords", "!=", "[", "]", ":", "message", "=", "(", "\"There was an error updating these records: \"", "+", "str", "(", "errorsRecords", ")", "+", "\" , the rest is OK.\"", ")", "else", ":", "message", "=", "(", "\"These records got updated: \"", "+", "str", "(", "sucessRecords", ")", ")", "return", "message" ]
Update DNS records.
[ "Update", "DNS", "records", "." ]
0fd027be49d67250f85f2398d006a9409a7dae28
https://github.com/ludeeus/pycfdns/blob/0fd027be49d67250f85f2398d006a9409a7dae28/pycfdns/__init__.py#L69-L98
244,798
20tab/twentytab-utils
twentytab/admin.py
ButtonableModelAdmin.change_view
def change_view(self, request, object_id, form_url='', extra_context={}): """ It adds ButtonLinks and ButtonForms to extra_context used in the change_form template """ extra_context['buttons_link'] = self.buttons_link extra_context['buttons_form'] = self.buttons_form extra_context['button_object_id'] = object_id return super(ButtonableModelAdmin, self).change_view(request, object_id, form_url, extra_context)
python
def change_view(self, request, object_id, form_url='', extra_context={}): """ It adds ButtonLinks and ButtonForms to extra_context used in the change_form template """ extra_context['buttons_link'] = self.buttons_link extra_context['buttons_form'] = self.buttons_form extra_context['button_object_id'] = object_id return super(ButtonableModelAdmin, self).change_view(request, object_id, form_url, extra_context)
[ "def", "change_view", "(", "self", ",", "request", ",", "object_id", ",", "form_url", "=", "''", ",", "extra_context", "=", "{", "}", ")", ":", "extra_context", "[", "'buttons_link'", "]", "=", "self", ".", "buttons_link", "extra_context", "[", "'buttons_form'", "]", "=", "self", ".", "buttons_form", "extra_context", "[", "'button_object_id'", "]", "=", "object_id", "return", "super", "(", "ButtonableModelAdmin", ",", "self", ")", ".", "change_view", "(", "request", ",", "object_id", ",", "form_url", ",", "extra_context", ")" ]
It adds ButtonLinks and ButtonForms to extra_context used in the change_form template
[ "It", "adds", "ButtonLinks", "and", "ButtonForms", "to", "extra_context", "used", "in", "the", "change_form", "template" ]
e02d55b1fd848c8e11ca9b7e97a5916780544d34
https://github.com/20tab/twentytab-utils/blob/e02d55b1fd848c8e11ca9b7e97a5916780544d34/twentytab/admin.py#L61-L68
244,799
rameshg87/pyremotevbox
pyremotevbox/ZSI/fault.py
FaultFromException
def FaultFromException(ex, inheader, tb=None, actor=None): '''Return a Fault object created from a Python exception. <SOAP-ENV:Fault> <faultcode>SOAP-ENV:Server</faultcode> <faultstring>Processing Failure</faultstring> <detail> <ZSI:FaultDetail> <ZSI:string></ZSI:string> <ZSI:trace></ZSI:trace> </ZSI:FaultDetail> </detail> </SOAP-ENV:Fault> ''' tracetext = None if tb: try: lines = '\n'.join(['%s:%d:%s' % (name, line, func) for name, line, func, text in traceback.extract_tb(tb)]) except: pass else: tracetext = lines exceptionName = "" try: exceptionName = ":".join([ex.__module__, ex.__class__.__name__]) except: pass elt = ZSIFaultDetail(string=exceptionName + "\n" + str(ex), trace=tracetext) if inheader: detail, headerdetail = None, elt else: detail, headerdetail = elt, None return Fault(Fault.Server, 'Processing Failure', actor, detail, headerdetail)
python
def FaultFromException(ex, inheader, tb=None, actor=None): '''Return a Fault object created from a Python exception. <SOAP-ENV:Fault> <faultcode>SOAP-ENV:Server</faultcode> <faultstring>Processing Failure</faultstring> <detail> <ZSI:FaultDetail> <ZSI:string></ZSI:string> <ZSI:trace></ZSI:trace> </ZSI:FaultDetail> </detail> </SOAP-ENV:Fault> ''' tracetext = None if tb: try: lines = '\n'.join(['%s:%d:%s' % (name, line, func) for name, line, func, text in traceback.extract_tb(tb)]) except: pass else: tracetext = lines exceptionName = "" try: exceptionName = ":".join([ex.__module__, ex.__class__.__name__]) except: pass elt = ZSIFaultDetail(string=exceptionName + "\n" + str(ex), trace=tracetext) if inheader: detail, headerdetail = None, elt else: detail, headerdetail = elt, None return Fault(Fault.Server, 'Processing Failure', actor, detail, headerdetail)
[ "def", "FaultFromException", "(", "ex", ",", "inheader", ",", "tb", "=", "None", ",", "actor", "=", "None", ")", ":", "tracetext", "=", "None", "if", "tb", ":", "try", ":", "lines", "=", "'\\n'", ".", "join", "(", "[", "'%s:%d:%s'", "%", "(", "name", ",", "line", ",", "func", ")", "for", "name", ",", "line", ",", "func", ",", "text", "in", "traceback", ".", "extract_tb", "(", "tb", ")", "]", ")", "except", ":", "pass", "else", ":", "tracetext", "=", "lines", "exceptionName", "=", "\"\"", "try", ":", "exceptionName", "=", "\":\"", ".", "join", "(", "[", "ex", ".", "__module__", ",", "ex", ".", "__class__", ".", "__name__", "]", ")", "except", ":", "pass", "elt", "=", "ZSIFaultDetail", "(", "string", "=", "exceptionName", "+", "\"\\n\"", "+", "str", "(", "ex", ")", ",", "trace", "=", "tracetext", ")", "if", "inheader", ":", "detail", ",", "headerdetail", "=", "None", ",", "elt", "else", ":", "detail", ",", "headerdetail", "=", "elt", ",", "None", "return", "Fault", "(", "Fault", ".", "Server", ",", "'Processing Failure'", ",", "actor", ",", "detail", ",", "headerdetail", ")" ]
Return a Fault object created from a Python exception. <SOAP-ENV:Fault> <faultcode>SOAP-ENV:Server</faultcode> <faultstring>Processing Failure</faultstring> <detail> <ZSI:FaultDetail> <ZSI:string></ZSI:string> <ZSI:trace></ZSI:trace> </ZSI:FaultDetail> </detail> </SOAP-ENV:Fault>
[ "Return", "a", "Fault", "object", "created", "from", "a", "Python", "exception", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/fault.py#L213-L247