id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
245,000
RonenNess/Fileter
fileter/files_iterator.py
FilesIterator.match_filters
def match_filters(self, path): """ Get filename and return True if file pass all filters and should be processed. :param path: path to check. :return: True if pass filters, false otherwise. """ # indicate if all required filters were matched all_required_match = True # iterate over filters to match files for filt, ftype in self.__filters: # handle "Required" filters: if all_required_match and ftype == self.FilterType.Required and not filt.match(path): all_required_match = False # handle "Include" filters: elif ftype == self.FilterType.Include and filt.match(path): return True # handle "Exclude" filters: elif ftype == self.FilterType.Exclude and filt.match(path): return False # if got here it means we processed all filters, and no include/exclude filter was matched. # return if all required were matched return all_required_match
python
def match_filters(self, path): """ Get filename and return True if file pass all filters and should be processed. :param path: path to check. :return: True if pass filters, false otherwise. """ # indicate if all required filters were matched all_required_match = True # iterate over filters to match files for filt, ftype in self.__filters: # handle "Required" filters: if all_required_match and ftype == self.FilterType.Required and not filt.match(path): all_required_match = False # handle "Include" filters: elif ftype == self.FilterType.Include and filt.match(path): return True # handle "Exclude" filters: elif ftype == self.FilterType.Exclude and filt.match(path): return False # if got here it means we processed all filters, and no include/exclude filter was matched. # return if all required were matched return all_required_match
[ "def", "match_filters", "(", "self", ",", "path", ")", ":", "# indicate if all required filters were matched", "all_required_match", "=", "True", "# iterate over filters to match files", "for", "filt", ",", "ftype", "in", "self", ".", "__filters", ":", "# handle \"Required\" filters:", "if", "all_required_match", "and", "ftype", "==", "self", ".", "FilterType", ".", "Required", "and", "not", "filt", ".", "match", "(", "path", ")", ":", "all_required_match", "=", "False", "# handle \"Include\" filters:", "elif", "ftype", "==", "self", ".", "FilterType", ".", "Include", "and", "filt", ".", "match", "(", "path", ")", ":", "return", "True", "# handle \"Exclude\" filters:", "elif", "ftype", "==", "self", ".", "FilterType", ".", "Exclude", "and", "filt", ".", "match", "(", "path", ")", ":", "return", "False", "# if got here it means we processed all filters, and no include/exclude filter was matched.", "# return if all required were matched", "return", "all_required_match" ]
Get filename and return True if file pass all filters and should be processed. :param path: path to check. :return: True if pass filters, false otherwise.
[ "Get", "filename", "and", "return", "True", "if", "file", "pass", "all", "filters", "and", "should", "be", "processed", "." ]
5372221b4049d5d46a9926573b91af17681c81f3
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L284-L311
245,001
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/commands.py
SetPyclassMetaclass
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs): """set up pyclass metaclass for complexTypes""" from pyremotevbox.ZSI.generate.containers import ServiceHeaderContainer,\ TypecodeContainerBase, TypesHeaderContainer TypecodeContainerBase.metaclass = kwargs['metaclass'] TypesHeaderContainer.imports.append(\ 'from %(module)s import %(metaclass)s' %kwargs ) ServiceHeaderContainer.imports.append(\ 'from %(module)s import %(metaclass)s' %kwargs )
python
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs): """set up pyclass metaclass for complexTypes""" from pyremotevbox.ZSI.generate.containers import ServiceHeaderContainer,\ TypecodeContainerBase, TypesHeaderContainer TypecodeContainerBase.metaclass = kwargs['metaclass'] TypesHeaderContainer.imports.append(\ 'from %(module)s import %(metaclass)s' %kwargs ) ServiceHeaderContainer.imports.append(\ 'from %(module)s import %(metaclass)s' %kwargs )
[ "def", "SetPyclassMetaclass", "(", "option", ",", "opt", ",", "value", ",", "parser", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pyremotevbox", ".", "ZSI", ".", "generate", ".", "containers", "import", "ServiceHeaderContainer", ",", "TypecodeContainerBase", ",", "TypesHeaderContainer", "TypecodeContainerBase", ".", "metaclass", "=", "kwargs", "[", "'metaclass'", "]", "TypesHeaderContainer", ".", "imports", ".", "append", "(", "'from %(module)s import %(metaclass)s'", "%", "kwargs", ")", "ServiceHeaderContainer", ".", "imports", ".", "append", "(", "'from %(module)s import %(metaclass)s'", "%", "kwargs", ")" ]
set up pyclass metaclass for complexTypes
[ "set", "up", "pyclass", "metaclass", "for", "complexTypes" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/commands.py#L27-L38
245,002
radjkarl/fancyTools
fancytools/render/GridRender.py
GridRender.averageValues
def averageValues(self): """ return the averaged values in the grid """ assert self.opts['record_density'] and self.opts['method'] == 'sum' # dont increase value of partly filled cells (density 0..1): filled = self.density > 1 v = self.values.copy() v[filled] /= self.density[filled] # ONLY AS OPTION??: v[~filled] *= self.density[~filled] return v
python
def averageValues(self): """ return the averaged values in the grid """ assert self.opts['record_density'] and self.opts['method'] == 'sum' # dont increase value of partly filled cells (density 0..1): filled = self.density > 1 v = self.values.copy() v[filled] /= self.density[filled] # ONLY AS OPTION??: v[~filled] *= self.density[~filled] return v
[ "def", "averageValues", "(", "self", ")", ":", "assert", "self", ".", "opts", "[", "'record_density'", "]", "and", "self", ".", "opts", "[", "'method'", "]", "==", "'sum'", "# dont increase value of partly filled cells (density 0..1):", "filled", "=", "self", ".", "density", ">", "1", "v", "=", "self", ".", "values", ".", "copy", "(", ")", "v", "[", "filled", "]", "/=", "self", ".", "density", "[", "filled", "]", "# ONLY AS OPTION??:", "v", "[", "~", "filled", "]", "*=", "self", ".", "density", "[", "~", "filled", "]", "return", "v" ]
return the averaged values in the grid
[ "return", "the", "averaged", "values", "in", "the", "grid" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/render/GridRender.py#L99-L111
245,003
incuna/incuna-auth
incuna_auth/middleware/permission.py
BasePermissionMiddleware.deny_access
def deny_access(self, request, **kwargs): """ Standard failure behaviour. Returns HTTP 403 (Forbidden) for non-GET requests. For GET requests, returns HTTP 302 (Redirect) pointing at either a URL specified in the class's unauthorised_redirect attribute, if one exists, or / if not. This version also adds a (translated) message if one is passed in. """ # Raise a 403 for POST/DELETE etc. if request.method != 'GET': return HttpResponseForbidden() # Add a message, if one has been defined. message = self.get_access_denied_message(request) if message: messages.info(request, _(message)) # Return a HTTP 302 redirect. redirect_url = self.get_unauthorised_redirect_url(request) return redirect_to_login(request.get_full_path(), login_url=redirect_url)
python
def deny_access(self, request, **kwargs): """ Standard failure behaviour. Returns HTTP 403 (Forbidden) for non-GET requests. For GET requests, returns HTTP 302 (Redirect) pointing at either a URL specified in the class's unauthorised_redirect attribute, if one exists, or / if not. This version also adds a (translated) message if one is passed in. """ # Raise a 403 for POST/DELETE etc. if request.method != 'GET': return HttpResponseForbidden() # Add a message, if one has been defined. message = self.get_access_denied_message(request) if message: messages.info(request, _(message)) # Return a HTTP 302 redirect. redirect_url = self.get_unauthorised_redirect_url(request) return redirect_to_login(request.get_full_path(), login_url=redirect_url)
[ "def", "deny_access", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "# Raise a 403 for POST/DELETE etc.", "if", "request", ".", "method", "!=", "'GET'", ":", "return", "HttpResponseForbidden", "(", ")", "# Add a message, if one has been defined.", "message", "=", "self", ".", "get_access_denied_message", "(", "request", ")", "if", "message", ":", "messages", ".", "info", "(", "request", ",", "_", "(", "message", ")", ")", "# Return a HTTP 302 redirect.", "redirect_url", "=", "self", ".", "get_unauthorised_redirect_url", "(", "request", ")", "return", "redirect_to_login", "(", "request", ".", "get_full_path", "(", ")", ",", "login_url", "=", "redirect_url", ")" ]
Standard failure behaviour. Returns HTTP 403 (Forbidden) for non-GET requests. For GET requests, returns HTTP 302 (Redirect) pointing at either a URL specified in the class's unauthorised_redirect attribute, if one exists, or / if not. This version also adds a (translated) message if one is passed in.
[ "Standard", "failure", "behaviour", "." ]
949ccd922da15a4b5de17b9595cc8f5114d5385c
https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission.py#L64-L85
245,004
incuna/incuna-auth
incuna_auth/middleware/permission.py
BasePermissionMiddleware.process_request
def process_request(self, request): """ The actual middleware method, called on all incoming requests. This default implementation will ignore the middleware (return None) if the conditions specified in is_resource_protected aren't met. If they are, it then tests to see if the user should be denied access via the denied_access_condition method, and calls deny_access (which implements failure behaviour) if so. """ if not self.is_resource_protected(request): return if self.deny_access_condition(request): return self.deny_access(request)
python
def process_request(self, request): """ The actual middleware method, called on all incoming requests. This default implementation will ignore the middleware (return None) if the conditions specified in is_resource_protected aren't met. If they are, it then tests to see if the user should be denied access via the denied_access_condition method, and calls deny_access (which implements failure behaviour) if so. """ if not self.is_resource_protected(request): return if self.deny_access_condition(request): return self.deny_access(request)
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "if", "not", "self", ".", "is_resource_protected", "(", "request", ")", ":", "return", "if", "self", ".", "deny_access_condition", "(", "request", ")", ":", "return", "self", ".", "deny_access", "(", "request", ")" ]
The actual middleware method, called on all incoming requests. This default implementation will ignore the middleware (return None) if the conditions specified in is_resource_protected aren't met. If they are, it then tests to see if the user should be denied access via the denied_access_condition method, and calls deny_access (which implements failure behaviour) if so.
[ "The", "actual", "middleware", "method", "called", "on", "all", "incoming", "requests", "." ]
949ccd922da15a4b5de17b9595cc8f5114d5385c
https://github.com/incuna/incuna-auth/blob/949ccd922da15a4b5de17b9595cc8f5114d5385c/incuna_auth/middleware/permission.py#L87-L100
245,005
ajk8/workdir-python
workdir/__init__.py
as_cwd
def as_cwd(): """ Use workdir.options.path as a temporary working directory """ _set_log_level() owd = os.getcwd() logger.debug('entering working directory: ' + options.path) os.chdir(os.path.expanduser(options.path)) yield logger.debug('returning to original directory: ' + owd) os.chdir(owd)
python
def as_cwd(): """ Use workdir.options.path as a temporary working directory """ _set_log_level() owd = os.getcwd() logger.debug('entering working directory: ' + options.path) os.chdir(os.path.expanduser(options.path)) yield logger.debug('returning to original directory: ' + owd) os.chdir(owd)
[ "def", "as_cwd", "(", ")", ":", "_set_log_level", "(", ")", "owd", "=", "os", ".", "getcwd", "(", ")", "logger", ".", "debug", "(", "'entering working directory: '", "+", "options", ".", "path", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "expanduser", "(", "options", ".", "path", ")", ")", "yield", "logger", ".", "debug", "(", "'returning to original directory: '", "+", "owd", ")", "os", ".", "chdir", "(", "owd", ")" ]
Use workdir.options.path as a temporary working directory
[ "Use", "workdir", ".", "options", ".", "path", "as", "a", "temporary", "working", "directory" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L38-L46
245,006
ajk8/workdir-python
workdir/__init__.py
_gitignore_entry_to_regex
def _gitignore_entry_to_regex(entry): """ Take a path that you might find in a .gitignore file and turn it into a regex """ ret = entry.strip() ret = ret.replace('.', '\.') ret = ret.replace('*', '.*') return ret
python
def _gitignore_entry_to_regex(entry): """ Take a path that you might find in a .gitignore file and turn it into a regex """ ret = entry.strip() ret = ret.replace('.', '\.') ret = ret.replace('*', '.*') return ret
[ "def", "_gitignore_entry_to_regex", "(", "entry", ")", ":", "ret", "=", "entry", ".", "strip", "(", ")", "ret", "=", "ret", ".", "replace", "(", "'.'", ",", "'\\.'", ")", "ret", "=", "ret", ".", "replace", "(", "'*'", ",", "'.*'", ")", "return", "ret" ]
Take a path that you might find in a .gitignore file and turn it into a regex
[ "Take", "a", "path", "that", "you", "might", "find", "in", "a", ".", "gitignore", "file", "and", "turn", "it", "into", "a", "regex" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L49-L54
245,007
ajk8/workdir-python
workdir/__init__.py
sync
def sync(sourcedir=None, exclude_gitignore_entries=None, exclude_regex_list=None): """ Create and populate workdir.options.path, memoized so that it only runs once """ _set_log_level() sourcedir = sourcedir or options.sync_sourcedir or os.getcwd() if exclude_gitignore_entries is None: exclude_gitignore_entries = options.sync_exclude_gitignore_entries exclude_regex_list = exclude_regex_list or copy.copy(options.sync_exclude_regex_list) gitignore_path = os.path.join(sourcedir, '.gitignore') if exclude_gitignore_entries and os.path.isfile(gitignore_path): gitignore_lines = [] with open(gitignore_path) as gitignore: for line in gitignore.readlines(): line = line.strip() if line and not line.startswith('#'): gitignore_lines.append(_gitignore_entry_to_regex(line)) exclude_regex_list += gitignore_lines dirsync_logger = logging.getLogger('dirsync') dirsync_logger.setLevel(logging.INFO if options.debug else logging.FATAL) logger.info('syncing {} to {}'.format(sourcedir, options.path)) logger.debug('excluding {} from sync'.format(exclude_regex_list)) dirsync.sync( sourcedir=sourcedir, targetdir=options.path, action='sync', create=True, exclude=exclude_regex_list, logger=dirsync_logger )
python
def sync(sourcedir=None, exclude_gitignore_entries=None, exclude_regex_list=None): """ Create and populate workdir.options.path, memoized so that it only runs once """ _set_log_level() sourcedir = sourcedir or options.sync_sourcedir or os.getcwd() if exclude_gitignore_entries is None: exclude_gitignore_entries = options.sync_exclude_gitignore_entries exclude_regex_list = exclude_regex_list or copy.copy(options.sync_exclude_regex_list) gitignore_path = os.path.join(sourcedir, '.gitignore') if exclude_gitignore_entries and os.path.isfile(gitignore_path): gitignore_lines = [] with open(gitignore_path) as gitignore: for line in gitignore.readlines(): line = line.strip() if line and not line.startswith('#'): gitignore_lines.append(_gitignore_entry_to_regex(line)) exclude_regex_list += gitignore_lines dirsync_logger = logging.getLogger('dirsync') dirsync_logger.setLevel(logging.INFO if options.debug else logging.FATAL) logger.info('syncing {} to {}'.format(sourcedir, options.path)) logger.debug('excluding {} from sync'.format(exclude_regex_list)) dirsync.sync( sourcedir=sourcedir, targetdir=options.path, action='sync', create=True, exclude=exclude_regex_list, logger=dirsync_logger )
[ "def", "sync", "(", "sourcedir", "=", "None", ",", "exclude_gitignore_entries", "=", "None", ",", "exclude_regex_list", "=", "None", ")", ":", "_set_log_level", "(", ")", "sourcedir", "=", "sourcedir", "or", "options", ".", "sync_sourcedir", "or", "os", ".", "getcwd", "(", ")", "if", "exclude_gitignore_entries", "is", "None", ":", "exclude_gitignore_entries", "=", "options", ".", "sync_exclude_gitignore_entries", "exclude_regex_list", "=", "exclude_regex_list", "or", "copy", ".", "copy", "(", "options", ".", "sync_exclude_regex_list", ")", "gitignore_path", "=", "os", ".", "path", ".", "join", "(", "sourcedir", ",", "'.gitignore'", ")", "if", "exclude_gitignore_entries", "and", "os", ".", "path", ".", "isfile", "(", "gitignore_path", ")", ":", "gitignore_lines", "=", "[", "]", "with", "open", "(", "gitignore_path", ")", "as", "gitignore", ":", "for", "line", "in", "gitignore", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "gitignore_lines", ".", "append", "(", "_gitignore_entry_to_regex", "(", "line", ")", ")", "exclude_regex_list", "+=", "gitignore_lines", "dirsync_logger", "=", "logging", ".", "getLogger", "(", "'dirsync'", ")", "dirsync_logger", ".", "setLevel", "(", "logging", ".", "INFO", "if", "options", ".", "debug", "else", "logging", ".", "FATAL", ")", "logger", ".", "info", "(", "'syncing {} to {}'", ".", "format", "(", "sourcedir", ",", "options", ".", "path", ")", ")", "logger", ".", "debug", "(", "'excluding {} from sync'", ".", "format", "(", "exclude_regex_list", ")", ")", "dirsync", ".", "sync", "(", "sourcedir", "=", "sourcedir", ",", "targetdir", "=", "options", ".", "path", ",", "action", "=", "'sync'", ",", "create", "=", "True", ",", "exclude", "=", "exclude_regex_list", ",", "logger", "=", "dirsync_logger", ")" ]
Create and populate workdir.options.path, memoized so that it only runs once
[ "Create", "and", "populate", "workdir", ".", "options", ".", "path", "memoized", "so", "that", "it", "only", "runs", "once" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L57-L84
245,008
ajk8/workdir-python
workdir/__init__.py
create
def create(): """ Create workdir.options.path """ if not os.path.isdir(options.path): logger.info('creating working directory: ' + options.path) os.makedirs(options.path)
python
def create(): """ Create workdir.options.path """ if not os.path.isdir(options.path): logger.info('creating working directory: ' + options.path) os.makedirs(options.path)
[ "def", "create", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "options", ".", "path", ")", ":", "logger", ".", "info", "(", "'creating working directory: '", "+", "options", ".", "path", ")", "os", ".", "makedirs", "(", "options", ".", "path", ")" ]
Create workdir.options.path
[ "Create", "workdir", ".", "options", ".", "path" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L87-L91
245,009
ajk8/workdir-python
workdir/__init__.py
clean
def clean(): """ Remove all of the files contained in workdir.options.path """ if os.path.isdir(options.path): logger.info('cleaning working directory: ' + options.path) for filename in os.listdir(options.path): filepath = os.path.join(options.path, filename) if os.path.isdir(filepath): shutil.rmtree(os.path.join(options.path, filename)) else: os.remove(filepath)
python
def clean(): """ Remove all of the files contained in workdir.options.path """ if os.path.isdir(options.path): logger.info('cleaning working directory: ' + options.path) for filename in os.listdir(options.path): filepath = os.path.join(options.path, filename) if os.path.isdir(filepath): shutil.rmtree(os.path.join(options.path, filename)) else: os.remove(filepath)
[ "def", "clean", "(", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "options", ".", "path", ")", ":", "logger", ".", "info", "(", "'cleaning working directory: '", "+", "options", ".", "path", ")", "for", "filename", "in", "os", ".", "listdir", "(", "options", ".", "path", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "options", ".", "path", ",", "filename", ")", "if", "os", ".", "path", ".", "isdir", "(", "filepath", ")", ":", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "options", ".", "path", ",", "filename", ")", ")", "else", ":", "os", ".", "remove", "(", "filepath", ")" ]
Remove all of the files contained in workdir.options.path
[ "Remove", "all", "of", "the", "files", "contained", "in", "workdir", ".", "options", ".", "path" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L94-L103
245,010
ajk8/workdir-python
workdir/__init__.py
remove
def remove(): """ Remove workdir.options.path """ if os.path.isdir(options.path): logger.info('removing working directory: ' + options.path) shutil.rmtree(options.path)
python
def remove(): """ Remove workdir.options.path """ if os.path.isdir(options.path): logger.info('removing working directory: ' + options.path) shutil.rmtree(options.path)
[ "def", "remove", "(", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "options", ".", "path", ")", ":", "logger", ".", "info", "(", "'removing working directory: '", "+", "options", ".", "path", ")", "shutil", ".", "rmtree", "(", "options", ".", "path", ")" ]
Remove workdir.options.path
[ "Remove", "workdir", ".", "options", ".", "path" ]
44a62f45cefb9a1b834d23191e88340b790a553e
https://github.com/ajk8/workdir-python/blob/44a62f45cefb9a1b834d23191e88340b790a553e/workdir/__init__.py#L106-L110
245,011
observerss/yamo
yamo/document.py
MapperMixin.query
def query(cls, *args, **kwargs): """ Same as collection.find, but return Document then dict """ for doc in cls._coll.find(*args, **kwargs): yield cls.from_storage(doc)
python
def query(cls, *args, **kwargs): """ Same as collection.find, but return Document then dict """ for doc in cls._coll.find(*args, **kwargs): yield cls.from_storage(doc)
[ "def", "query", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "doc", "in", "cls", ".", "_coll", ".", "find", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "cls", ".", "from_storage", "(", "doc", ")" ]
Same as collection.find, but return Document then dict
[ "Same", "as", "collection", ".", "find", "but", "return", "Document", "then", "dict" ]
ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646
https://github.com/observerss/yamo/blob/ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646/yamo/document.py#L171-L174
245,012
observerss/yamo
yamo/document.py
MapperMixin.query_one
def query_one(cls, *args, **kwargs): """ Same as collection.find_one, but return Document then dict """ doc = cls._coll.find_one(*args, **kwargs) if doc: return cls.from_storage(doc)
python
def query_one(cls, *args, **kwargs): """ Same as collection.find_one, but return Document then dict """ doc = cls._coll.find_one(*args, **kwargs) if doc: return cls.from_storage(doc)
[ "def", "query_one", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "doc", "=", "cls", ".", "_coll", ".", "find_one", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "doc", ":", "return", "cls", ".", "from_storage", "(", "doc", ")" ]
Same as collection.find_one, but return Document then dict
[ "Same", "as", "collection", ".", "find_one", "but", "return", "Document", "then", "dict" ]
ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646
https://github.com/observerss/yamo/blob/ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646/yamo/document.py#L177-L181
245,013
observerss/yamo
yamo/document.py
MapperMixin.upsert
def upsert(self, null=False): """ Insert or Update Document :param null: whether update null values Wisely select unique field values as filter, Update with upsert=True """ self._pre_save() self.validate() filter_ = self._upsert_filter() if filter_: update = self._upsert_update(filter_, null) if update['$set']: r = self._coll.find_one_and_update(filter_, update, upsert=True, new=True) self._data['_id'] = r['_id'] else: r = self._coll.insert_one(self._data) self._data['_id'] = r.inserted_id
python
def upsert(self, null=False): """ Insert or Update Document :param null: whether update null values Wisely select unique field values as filter, Update with upsert=True """ self._pre_save() self.validate() filter_ = self._upsert_filter() if filter_: update = self._upsert_update(filter_, null) if update['$set']: r = self._coll.find_one_and_update(filter_, update, upsert=True, new=True) self._data['_id'] = r['_id'] else: r = self._coll.insert_one(self._data) self._data['_id'] = r.inserted_id
[ "def", "upsert", "(", "self", ",", "null", "=", "False", ")", ":", "self", ".", "_pre_save", "(", ")", "self", ".", "validate", "(", ")", "filter_", "=", "self", ".", "_upsert_filter", "(", ")", "if", "filter_", ":", "update", "=", "self", ".", "_upsert_update", "(", "filter_", ",", "null", ")", "if", "update", "[", "'$set'", "]", ":", "r", "=", "self", ".", "_coll", ".", "find_one_and_update", "(", "filter_", ",", "update", ",", "upsert", "=", "True", ",", "new", "=", "True", ")", "self", ".", "_data", "[", "'_id'", "]", "=", "r", "[", "'_id'", "]", "else", ":", "r", "=", "self", ".", "_coll", ".", "insert_one", "(", "self", ".", "_data", ")", "self", ".", "_data", "[", "'_id'", "]", "=", "r", ".", "inserted_id" ]
Insert or Update Document :param null: whether update null values Wisely select unique field values as filter, Update with upsert=True
[ "Insert", "or", "Update", "Document" ]
ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646
https://github.com/observerss/yamo/blob/ef0ab1ab7be2ecbc452d55ac9b367eb4c0d88646/yamo/document.py#L188-L208
245,014
delfick/aws_syncr
aws_syncr/amazon/apigateway.py
ApiGateway.load_info
def load_info(self, client, info): """Fill out information about the gateway""" if 'identity' in info: info['stages'] = client.get_stages(restApiId=info['identity'])['item'] info['resources'] = client.get_resources(restApiId=info['identity'])['items'] for resource in info['resources']: for method in resource.get('resourceMethods', {}): resource['resourceMethods'][method] = client.get_method(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method) for status_code, options in resource['resourceMethods'][method]['methodResponses'].items(): options.update(client.get_method_response(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method, statusCode=status_code)) info['deployment'] = client.get_deployments(restApiId=info['identity'])['items'] else: for key in ('stages', 'resources', 'deployment'): info[key] = [] info['api_keys'] = client.get_api_keys()['items'] info['domains'] = client.get_domain_names()['items'] for domain in info['domains']: domain['mappings'] = client.get_base_path_mappings(domainName=domain['domainName']).get('items', [])
python
def load_info(self, client, info): """Fill out information about the gateway""" if 'identity' in info: info['stages'] = client.get_stages(restApiId=info['identity'])['item'] info['resources'] = client.get_resources(restApiId=info['identity'])['items'] for resource in info['resources']: for method in resource.get('resourceMethods', {}): resource['resourceMethods'][method] = client.get_method(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method) for status_code, options in resource['resourceMethods'][method]['methodResponses'].items(): options.update(client.get_method_response(restApiId=info['identity'], resourceId=resource['id'], httpMethod=method, statusCode=status_code)) info['deployment'] = client.get_deployments(restApiId=info['identity'])['items'] else: for key in ('stages', 'resources', 'deployment'): info[key] = [] info['api_keys'] = client.get_api_keys()['items'] info['domains'] = client.get_domain_names()['items'] for domain in info['domains']: domain['mappings'] = client.get_base_path_mappings(domainName=domain['domainName']).get('items', [])
[ "def", "load_info", "(", "self", ",", "client", ",", "info", ")", ":", "if", "'identity'", "in", "info", ":", "info", "[", "'stages'", "]", "=", "client", ".", "get_stages", "(", "restApiId", "=", "info", "[", "'identity'", "]", ")", "[", "'item'", "]", "info", "[", "'resources'", "]", "=", "client", ".", "get_resources", "(", "restApiId", "=", "info", "[", "'identity'", "]", ")", "[", "'items'", "]", "for", "resource", "in", "info", "[", "'resources'", "]", ":", "for", "method", "in", "resource", ".", "get", "(", "'resourceMethods'", ",", "{", "}", ")", ":", "resource", "[", "'resourceMethods'", "]", "[", "method", "]", "=", "client", ".", "get_method", "(", "restApiId", "=", "info", "[", "'identity'", "]", ",", "resourceId", "=", "resource", "[", "'id'", "]", ",", "httpMethod", "=", "method", ")", "for", "status_code", ",", "options", "in", "resource", "[", "'resourceMethods'", "]", "[", "method", "]", "[", "'methodResponses'", "]", ".", "items", "(", ")", ":", "options", ".", "update", "(", "client", ".", "get_method_response", "(", "restApiId", "=", "info", "[", "'identity'", "]", ",", "resourceId", "=", "resource", "[", "'id'", "]", ",", "httpMethod", "=", "method", ",", "statusCode", "=", "status_code", ")", ")", "info", "[", "'deployment'", "]", "=", "client", ".", "get_deployments", "(", "restApiId", "=", "info", "[", "'identity'", "]", ")", "[", "'items'", "]", "else", ":", "for", "key", "in", "(", "'stages'", ",", "'resources'", ",", "'deployment'", ")", ":", "info", "[", "key", "]", "=", "[", "]", "info", "[", "'api_keys'", "]", "=", "client", ".", "get_api_keys", "(", ")", "[", "'items'", "]", "info", "[", "'domains'", "]", "=", "client", ".", "get_domain_names", "(", ")", "[", "'items'", "]", "for", "domain", "in", "info", "[", "'domains'", "]", ":", "domain", "[", "'mappings'", "]", "=", "client", ".", "get_base_path_mappings", "(", "domainName", "=", "domain", "[", "'domainName'", "]", ")", ".", "get", "(", "'items'", ",", "[", "]", ")" ]
Fill out information about the gateway
[ "Fill", "out", "information", "about", "the", "gateway" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/amazon/apigateway.py#L38-L57
245,015
django-py/django-doberman
doberman/__init__.py
Doberman.get_user_ip
def get_user_ip(self, request): """ get the client IP address bassed on a HTTPRequest """ client_ip_address = None # searching the IP address for key in self.configuration.network.ip_meta_precedence_order: ip_meta_value = request.META.get(key, '').strip() if ip_meta_value != '': ips = [ip.strip().lower() for ip in ip_meta_value.split(',')] for ip_str in ips: if ip_str and is_valid_ip(ip_str): if not ip_str.startswith(self.configuration.network.non_public_ip_prefixes): return ip_str elif not self.configuration.network.real_ip_only: loopback = ('127.0.0.1', '::1') if client_ip_address is None: client_ip_address = ip_str elif client_ip_address in loopback and ip_str not in loopback: client_ip_address = ip_str if client_ip_address is None and settings.DEBUG: raise DobermanImproperlyConfigured( "Unknown IP, maybe you are working on localhost/development, " "so please set in your setting: DOBERMAN_REAL_IP_ONLY=False" ) return client_ip_address
python
def get_user_ip(self, request): """ get the client IP address bassed on a HTTPRequest """ client_ip_address = None # searching the IP address for key in self.configuration.network.ip_meta_precedence_order: ip_meta_value = request.META.get(key, '').strip() if ip_meta_value != '': ips = [ip.strip().lower() for ip in ip_meta_value.split(',')] for ip_str in ips: if ip_str and is_valid_ip(ip_str): if not ip_str.startswith(self.configuration.network.non_public_ip_prefixes): return ip_str elif not self.configuration.network.real_ip_only: loopback = ('127.0.0.1', '::1') if client_ip_address is None: client_ip_address = ip_str elif client_ip_address in loopback and ip_str not in loopback: client_ip_address = ip_str if client_ip_address is None and settings.DEBUG: raise DobermanImproperlyConfigured( "Unknown IP, maybe you are working on localhost/development, " "so please set in your setting: DOBERMAN_REAL_IP_ONLY=False" ) return client_ip_address
[ "def", "get_user_ip", "(", "self", ",", "request", ")", ":", "client_ip_address", "=", "None", "# searching the IP address", "for", "key", "in", "self", ".", "configuration", ".", "network", ".", "ip_meta_precedence_order", ":", "ip_meta_value", "=", "request", ".", "META", ".", "get", "(", "key", ",", "''", ")", ".", "strip", "(", ")", "if", "ip_meta_value", "!=", "''", ":", "ips", "=", "[", "ip", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "ip", "in", "ip_meta_value", ".", "split", "(", "','", ")", "]", "for", "ip_str", "in", "ips", ":", "if", "ip_str", "and", "is_valid_ip", "(", "ip_str", ")", ":", "if", "not", "ip_str", ".", "startswith", "(", "self", ".", "configuration", ".", "network", ".", "non_public_ip_prefixes", ")", ":", "return", "ip_str", "elif", "not", "self", ".", "configuration", ".", "network", ".", "real_ip_only", ":", "loopback", "=", "(", "'127.0.0.1'", ",", "'::1'", ")", "if", "client_ip_address", "is", "None", ":", "client_ip_address", "=", "ip_str", "elif", "client_ip_address", "in", "loopback", "and", "ip_str", "not", "in", "loopback", ":", "client_ip_address", "=", "ip_str", "if", "client_ip_address", "is", "None", "and", "settings", ".", "DEBUG", ":", "raise", "DobermanImproperlyConfigured", "(", "\"Unknown IP, maybe you are working on localhost/development, \"", "\"so please set in your setting: DOBERMAN_REAL_IP_ONLY=False\"", ")", "return", "client_ip_address" ]
get the client IP address bassed on a HTTPRequest
[ "get", "the", "client", "IP", "address", "bassed", "on", "a", "HTTPRequest" ]
2e5959737a1b64234ed5a179c93f96a0de1c3e5c
https://github.com/django-py/django-doberman/blob/2e5959737a1b64234ed5a179c93f96a0de1c3e5c/doberman/__init__.py#L63-L100
245,016
radjkarl/fancyTools
fancytools/os/PathStr.py
PathStr.raw
def raw(self): """Try to transform str to raw str" ... this will not work every time """ escape_dict = {'\a': r'\a', '\b': r'\b', '\c': r'\c', '\f': r'\f', '\n': r'\n', '\r': r'\r', '\t': r'\t', '\v': r'\v', #'\x':r'\x',#cannot do \x - otherwise exception '\'': r'\'', '\"': r'\"', #'\0':r'\0', #doesnt work '\1': r'\1', '\2': r'\2', '\3': r'\3', '\4': r'\4', '\5': r'\5', '\6': r'\6', #'\7':r'\7',#same as \a is ASCI } new_string = '' for char in self: try: new_string += escape_dict[char] except KeyError: new_string += char return new_string
python
def raw(self): """Try to transform str to raw str" ... this will not work every time """ escape_dict = {'\a': r'\a', '\b': r'\b', '\c': r'\c', '\f': r'\f', '\n': r'\n', '\r': r'\r', '\t': r'\t', '\v': r'\v', #'\x':r'\x',#cannot do \x - otherwise exception '\'': r'\'', '\"': r'\"', #'\0':r'\0', #doesnt work '\1': r'\1', '\2': r'\2', '\3': r'\3', '\4': r'\4', '\5': r'\5', '\6': r'\6', #'\7':r'\7',#same as \a is ASCI } new_string = '' for char in self: try: new_string += escape_dict[char] except KeyError: new_string += char return new_string
[ "def", "raw", "(", "self", ")", ":", "escape_dict", "=", "{", "'\\a'", ":", "r'\\a'", ",", "'\\b'", ":", "r'\\b'", ",", "'\\c'", ":", "r'\\c'", ",", "'\\f'", ":", "r'\\f'", ",", "'\\n'", ":", "r'\\n'", ",", "'\\r'", ":", "r'\\r'", ",", "'\\t'", ":", "r'\\t'", ",", "'\\v'", ":", "r'\\v'", ",", "#'\\x':r'\\x',#cannot do \\x - otherwise exception", "'\\''", ":", "r'\\''", ",", "'\\\"'", ":", "r'\\\"'", ",", "#'\\0':r'\\0', #doesnt work", "'\\1'", ":", "r'\\1'", ",", "'\\2'", ":", "r'\\2'", ",", "'\\3'", ":", "r'\\3'", ",", "'\\4'", ":", "r'\\4'", ",", "'\\5'", ":", "r'\\5'", ",", "'\\6'", ":", "r'\\6'", ",", "#'\\7':r'\\7',#same as \\a is ASCI", "}", "new_string", "=", "''", "for", "char", "in", "self", ":", "try", ":", "new_string", "+=", "escape_dict", "[", "char", "]", "except", "KeyError", ":", "new_string", "+=", "char", "return", "new_string" ]
Try to transform str to raw str" ... this will not work every time
[ "Try", "to", "transform", "str", "to", "raw", "str", "...", "this", "will", "not", "work", "every", "time" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/os/PathStr.py#L33-L64
245,017
radjkarl/fancyTools
fancytools/os/PathStr.py
PathStr.load
def load(self, size): """open and read the file is existent""" if self.exists() and self.isfile(): return eval(open(self).read(size))
python
def load(self, size): """open and read the file is existent""" if self.exists() and self.isfile(): return eval(open(self).read(size))
[ "def", "load", "(", "self", ",", "size", ")", ":", "if", "self", ".", "exists", "(", ")", "and", "self", ".", "isfile", "(", ")", ":", "return", "eval", "(", "open", "(", "self", ")", ".", "read", "(", "size", ")", ")" ]
open and read the file is existent
[ "open", "and", "read", "the", "file", "is", "existent" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/os/PathStr.py#L101-L104
245,018
radjkarl/fancyTools
fancytools/os/PathStr.py
PathStr.files
def files(self, ftype=None): """ return a first of path to all files within that folder """ a = [self.join(i) for i in self] if ftype is not None: return [i for i in a if i.isfile() and i.filetype() == ftype] return [i for i in a if i.isfile()]
python
def files(self, ftype=None): """ return a first of path to all files within that folder """ a = [self.join(i) for i in self] if ftype is not None: return [i for i in a if i.isfile() and i.filetype() == ftype] return [i for i in a if i.isfile()]
[ "def", "files", "(", "self", ",", "ftype", "=", "None", ")", ":", "a", "=", "[", "self", ".", "join", "(", "i", ")", "for", "i", "in", "self", "]", "if", "ftype", "is", "not", "None", ":", "return", "[", "i", "for", "i", "in", "a", "if", "i", ".", "isfile", "(", ")", "and", "i", ".", "filetype", "(", ")", "==", "ftype", "]", "return", "[", "i", "for", "i", "in", "a", "if", "i", ".", "isfile", "(", ")", "]" ]
return a first of path to all files within that folder
[ "return", "a", "first", "of", "path", "to", "all", "files", "within", "that", "folder" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/os/PathStr.py#L181-L188
245,019
Nixiware/viper
nx/viper/config.py
Config.mergeDictionaries
def mergeDictionaries(sourceDictionary, destinationDictionary): """ Deep merge dictionaries recursively. :param sourceDictionary: <dict> first dictionary with data :param destinationDictionary: <dict> second dictionary with data :return: <dict> merged dictionary """ log = Logger() varNamePattern = re.compile(r"^((__((ENV)|(FILE))__[A-Z]{3,})|(__((ENV)|(FILE))))__(?P<name>.*)$") varTypePattern = re.compile(r"^__((ENV)|(FILE))__(?P<type>[A-Z]{3,})__(.*)$") for key, value in sourceDictionary.items(): # ignoring comments if key == "//": continue if isinstance(value, dict): # get node or create one node = destinationDictionary.setdefault(key, {}) Config.mergeDictionaries(value, node) elif isinstance(value, str) and (value.startswith("__ENV__") or value.startswith("__FILE__")): # extracting environment variable name nameMatch = varNamePattern.match(value) if nameMatch is None: log.warn("Invalid environmental variable specified: {name}", name=value) continue envVariableName = nameMatch.group("name") # checking if environment variable is set if envVariableName not in os.environ: log.warn("No environment variable {name} is set.", name=envVariableName) continue if value.startswith("__ENV__"): # checking if value is set in the environment variable # checking if variable has a defined cast type typeMatch = varTypePattern.match(value) if typeMatch is not None: envVariableType = typeMatch.group("type") # casting value to the specified type if envVariableType == "STR": destinationDictionary[key] = str(os.environ[envVariableName]) elif envVariableType == "BOOL": if os.environ[envVariableName] == "1": destinationDictionary[key] = True elif os.environ[envVariableName] == "0": destinationDictionary[key] = False elif envVariableType == "INT": destinationDictionary[key] = int(os.environ[envVariableName]) elif envVariableType == "FLOAT": destinationDictionary[key] = float(os.environ[envVariableName]) elif envVariableType == "JSON": try: destinationDictionary[key] = json.loads(os.environ[envVariableName]) except Exception: log.warn( "Environment variable {name} contains an invalid JSON value.", name=envVariableName ) else: log.warn( "Unsupported type {type} specified for variable {name}.", name=envVariableName, type=envVariableType ) continue else: destinationDictionary[key] = os.environ[envVariableName] elif value.startswith("__FILE__"): # checking if value is set in a file filePath = os.environ[envVariableName] # checking if file exists if not os.path.isfile(filePath): log.warn( "File {filePath} does not exist.", filePath=filePath, ) continue # checking if file can be read if not os.access(filePath, os.R_OK): log.warn( "File {filePath} cannot be read.", filePath=filePath, ) continue # load file contents filePointer = open(filePath, "r") destinationDictionary[key] = filePointer.read().strip() filePointer.close() elif isinstance(value, str) and value.startswith("__FILE__"): pass else: destinationDictionary[key] = value return destinationDictionary
python
def mergeDictionaries(sourceDictionary, destinationDictionary): """ Deep merge dictionaries recursively. :param sourceDictionary: <dict> first dictionary with data :param destinationDictionary: <dict> second dictionary with data :return: <dict> merged dictionary """ log = Logger() varNamePattern = re.compile(r"^((__((ENV)|(FILE))__[A-Z]{3,})|(__((ENV)|(FILE))))__(?P<name>.*)$") varTypePattern = re.compile(r"^__((ENV)|(FILE))__(?P<type>[A-Z]{3,})__(.*)$") for key, value in sourceDictionary.items(): # ignoring comments if key == "//": continue if isinstance(value, dict): # get node or create one node = destinationDictionary.setdefault(key, {}) Config.mergeDictionaries(value, node) elif isinstance(value, str) and (value.startswith("__ENV__") or value.startswith("__FILE__")): # extracting environment variable name nameMatch = varNamePattern.match(value) if nameMatch is None: log.warn("Invalid environmental variable specified: {name}", name=value) continue envVariableName = nameMatch.group("name") # checking if environment variable is set if envVariableName not in os.environ: log.warn("No environment variable {name} is set.", name=envVariableName) continue if value.startswith("__ENV__"): # checking if value is set in the environment variable # checking if variable has a defined cast type typeMatch = varTypePattern.match(value) if typeMatch is not None: envVariableType = typeMatch.group("type") # casting value to the specified type if envVariableType == "STR": destinationDictionary[key] = str(os.environ[envVariableName]) elif envVariableType == "BOOL": if os.environ[envVariableName] == "1": destinationDictionary[key] = True elif os.environ[envVariableName] == "0": destinationDictionary[key] = False elif envVariableType == "INT": destinationDictionary[key] = int(os.environ[envVariableName]) elif envVariableType == "FLOAT": destinationDictionary[key] = float(os.environ[envVariableName]) elif envVariableType == "JSON": try: destinationDictionary[key] = json.loads(os.environ[envVariableName]) except Exception: log.warn( "Environment variable {name} contains an invalid JSON value.", name=envVariableName ) else: log.warn( "Unsupported type {type} specified for variable {name}.", name=envVariableName, type=envVariableType ) continue else: destinationDictionary[key] = os.environ[envVariableName] elif value.startswith("__FILE__"): # checking if value is set in a file filePath = os.environ[envVariableName] # checking if file exists if not os.path.isfile(filePath): log.warn( "File {filePath} does not exist.", filePath=filePath, ) continue # checking if file can be read if not os.access(filePath, os.R_OK): log.warn( "File {filePath} cannot be read.", filePath=filePath, ) continue # load file contents filePointer = open(filePath, "r") destinationDictionary[key] = filePointer.read().strip() filePointer.close() elif isinstance(value, str) and value.startswith("__FILE__"): pass else: destinationDictionary[key] = value return destinationDictionary
[ "def", "mergeDictionaries", "(", "sourceDictionary", ",", "destinationDictionary", ")", ":", "log", "=", "Logger", "(", ")", "varNamePattern", "=", "re", ".", "compile", "(", "r\"^((__((ENV)|(FILE))__[A-Z]{3,})|(__((ENV)|(FILE))))__(?P<name>.*)$\"", ")", "varTypePattern", "=", "re", ".", "compile", "(", "r\"^__((ENV)|(FILE))__(?P<type>[A-Z]{3,})__(.*)$\"", ")", "for", "key", ",", "value", "in", "sourceDictionary", ".", "items", "(", ")", ":", "# ignoring comments", "if", "key", "==", "\"//\"", ":", "continue", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "# get node or create one", "node", "=", "destinationDictionary", ".", "setdefault", "(", "key", ",", "{", "}", ")", "Config", ".", "mergeDictionaries", "(", "value", ",", "node", ")", "elif", "isinstance", "(", "value", ",", "str", ")", "and", "(", "value", ".", "startswith", "(", "\"__ENV__\"", ")", "or", "value", ".", "startswith", "(", "\"__FILE__\"", ")", ")", ":", "# extracting environment variable name", "nameMatch", "=", "varNamePattern", ".", "match", "(", "value", ")", "if", "nameMatch", "is", "None", ":", "log", ".", "warn", "(", "\"Invalid environmental variable specified: {name}\"", ",", "name", "=", "value", ")", "continue", "envVariableName", "=", "nameMatch", ".", "group", "(", "\"name\"", ")", "# checking if environment variable is set", "if", "envVariableName", "not", "in", "os", ".", "environ", ":", "log", ".", "warn", "(", "\"No environment variable {name} is set.\"", ",", "name", "=", "envVariableName", ")", "continue", "if", "value", ".", "startswith", "(", "\"__ENV__\"", ")", ":", "# checking if value is set in the environment variable", "# checking if variable has a defined cast type", "typeMatch", "=", "varTypePattern", ".", "match", "(", "value", ")", "if", "typeMatch", "is", "not", "None", ":", "envVariableType", "=", "typeMatch", ".", "group", "(", "\"type\"", ")", "# casting value to the specified type", "if", "envVariableType", "==", "\"STR\"", ":", "destinationDictionary", "[", "key", "]", "=", "str", "(", "os", ".", "environ", "[", "envVariableName", "]", ")", "elif", "envVariableType", "==", "\"BOOL\"", ":", "if", "os", ".", "environ", "[", "envVariableName", "]", "==", "\"1\"", ":", "destinationDictionary", "[", "key", "]", "=", "True", "elif", "os", ".", "environ", "[", "envVariableName", "]", "==", "\"0\"", ":", "destinationDictionary", "[", "key", "]", "=", "False", "elif", "envVariableType", "==", "\"INT\"", ":", "destinationDictionary", "[", "key", "]", "=", "int", "(", "os", ".", "environ", "[", "envVariableName", "]", ")", "elif", "envVariableType", "==", "\"FLOAT\"", ":", "destinationDictionary", "[", "key", "]", "=", "float", "(", "os", ".", "environ", "[", "envVariableName", "]", ")", "elif", "envVariableType", "==", "\"JSON\"", ":", "try", ":", "destinationDictionary", "[", "key", "]", "=", "json", ".", "loads", "(", "os", ".", "environ", "[", "envVariableName", "]", ")", "except", "Exception", ":", "log", ".", "warn", "(", "\"Environment variable {name} contains an invalid JSON value.\"", ",", "name", "=", "envVariableName", ")", "else", ":", "log", ".", "warn", "(", "\"Unsupported type {type} specified for variable {name}.\"", ",", "name", "=", "envVariableName", ",", "type", "=", "envVariableType", ")", "continue", "else", ":", "destinationDictionary", "[", "key", "]", "=", "os", ".", "environ", "[", "envVariableName", "]", "elif", "value", ".", "startswith", "(", "\"__FILE__\"", ")", ":", "# checking if value is set in a file", "filePath", "=", "os", ".", "environ", "[", "envVariableName", "]", "# checking if file exists", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "log", ".", "warn", "(", "\"File {filePath} does not exist.\"", ",", "filePath", "=", "filePath", ",", ")", "continue", "# checking if file can be read", "if", "not", "os", ".", "access", "(", "filePath", ",", "os", ".", "R_OK", ")", ":", "log", ".", "warn", "(", "\"File {filePath} cannot be read.\"", ",", "filePath", "=", "filePath", ",", ")", "continue", "# load file contents", "filePointer", "=", "open", "(", "filePath", ",", "\"r\"", ")", "destinationDictionary", "[", "key", "]", "=", "filePointer", ".", "read", "(", ")", ".", "strip", "(", ")", "filePointer", ".", "close", "(", ")", "elif", "isinstance", "(", "value", ",", "str", ")", "and", "value", ".", "startswith", "(", "\"__FILE__\"", ")", ":", "pass", "else", ":", "destinationDictionary", "[", "key", "]", "=", "value", "return", "destinationDictionary" ]
Deep merge dictionaries recursively. :param sourceDictionary: <dict> first dictionary with data :param destinationDictionary: <dict> second dictionary with data :return: <dict> merged dictionary
[ "Deep", "merge", "dictionaries", "recursively", "." ]
fbe6057facd8d46103e9955880dfd99e63b7acb3
https://github.com/Nixiware/viper/blob/fbe6057facd8d46103e9955880dfd99e63b7acb3/nx/viper/config.py#L70-L167
245,020
Saledddar/pyunet
pyunet/util.py
add_to_path
def add_to_path(p): ''' Adds a path to python paths and removes it after the 'with' block ends ''' old_path = sys.path if p not in sys.path: sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path
python
def add_to_path(p): ''' Adds a path to python paths and removes it after the 'with' block ends ''' old_path = sys.path if p not in sys.path: sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path
[ "def", "add_to_path", "(", "p", ")", ":", "old_path", "=", "sys", ".", "path", "if", "p", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", "=", "sys", ".", "path", "[", ":", "]", "sys", ".", "path", ".", "insert", "(", "0", ",", "p", ")", "try", ":", "yield", "finally", ":", "sys", ".", "path", "=", "old_path" ]
Adds a path to python paths and removes it after the 'with' block ends
[ "Adds", "a", "path", "to", "python", "paths", "and", "removes", "it", "after", "the", "with", "block", "ends" ]
ca5ccc32588fae8da43f968e7747d3f3da509507
https://github.com/Saledddar/pyunet/blob/ca5ccc32588fae8da43f968e7747d3f3da509507/pyunet/util.py#L20-L31
245,021
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/wsdl2dispatch.py
ServiceModuleWriter.setUpImports
def setUpImports(self): '''set import statements ''' i = self.imports print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD' print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct' module = self.getTypesModuleName() package = self.getTypesModulePath() if package: module = '%s.%s' %(package, module) print >>i, 'from %s import *' %(module) print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name)
python
def setUpImports(self): '''set import statements ''' i = self.imports print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD' print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct' module = self.getTypesModuleName() package = self.getTypesModulePath() if package: module = '%s.%s' %(package, module) print >>i, 'from %s import *' %(module) print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name)
[ "def", "setUpImports", "(", "self", ")", ":", "i", "=", "self", ".", "imports", "print", ">>", "i", ",", "'from pyremotevbox.ZSI.schema import GED, GTD'", "print", ">>", "i", ",", "'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'", "module", "=", "self", ".", "getTypesModuleName", "(", ")", "package", "=", "self", ".", "getTypesModulePath", "(", ")", "if", "package", ":", "module", "=", "'%s.%s'", "%", "(", "package", ",", "module", ")", "print", ">>", "i", ",", "'from %s import *'", "%", "(", "module", ")", "print", ">>", "i", ",", "'from %s import %s'", "%", "(", "self", ".", "base_module_name", ",", "self", ".", "base_class_name", ")" ]
set import statements
[ "set", "import", "statements" ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2dispatch.py#L143-L157
245,022
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/wsdl2dispatch.py
ServiceModuleWriter.write
def write(self, fd=sys.stdout): '''write out to file descriptor, should not need to override. ''' print >>fd, self.header.getvalue() print >>fd, self.imports.getvalue() print >>fd, '# Messages ', for m in self.messages: print >>fd, m print >>fd, '' print >>fd, '' print >>fd, '# Service Skeletons' for k,v in self._services.items(): print >>fd, v.classdef.getvalue() print >>fd, v.initdef.getvalue() for s in v.methods: print >>fd, s.getvalue()
python
def write(self, fd=sys.stdout): '''write out to file descriptor, should not need to override. ''' print >>fd, self.header.getvalue() print >>fd, self.imports.getvalue() print >>fd, '# Messages ', for m in self.messages: print >>fd, m print >>fd, '' print >>fd, '' print >>fd, '# Service Skeletons' for k,v in self._services.items(): print >>fd, v.classdef.getvalue() print >>fd, v.initdef.getvalue() for s in v.methods: print >>fd, s.getvalue()
[ "def", "write", "(", "self", ",", "fd", "=", "sys", ".", "stdout", ")", ":", "print", ">>", "fd", ",", "self", ".", "header", ".", "getvalue", "(", ")", "print", ">>", "fd", ",", "self", ".", "imports", ".", "getvalue", "(", ")", "print", ">>", "fd", ",", "'# Messages '", ",", "for", "m", "in", "self", ".", "messages", ":", "print", ">>", "fd", ",", "m", "print", ">>", "fd", ",", "''", "print", ">>", "fd", ",", "''", "print", ">>", "fd", ",", "'# Service Skeletons'", "for", "k", ",", "v", "in", "self", ".", "_services", ".", "items", "(", ")", ":", "print", ">>", "fd", ",", "v", ".", "classdef", ".", "getvalue", "(", ")", "print", ">>", "fd", ",", "v", ".", "initdef", ".", "getvalue", "(", ")", "for", "s", "in", "v", ".", "methods", ":", "print", ">>", "fd", ",", "s", ".", "getvalue", "(", ")" ]
write out to file descriptor, should not need to override.
[ "write", "out", "to", "file", "descriptor", "should", "not", "need", "to", "override", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2dispatch.py#L254-L272
245,023
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/wsdl2dispatch.py
ServiceModuleWriter.fromWSDL
def fromWSDL(self, wsdl): '''setup the service description from WSDL, should not need to override. ''' assert isinstance(wsdl, WSDLTools.WSDL), 'expecting WSDL instance' if len(wsdl.services) == 0: raise WsdlGeneratorError, 'No service defined' self.reset() self.wsdl = wsdl self.setUpHeader() self.setUpImports() for service in wsdl.services: sd = self._service_class(service.name) self._services[service.name] = sd for port in service.ports: desc = BindingDescription(wsdl=wsdl) try: desc.setUp(port.getBinding()) except Wsdl2PythonError, ex: continue for soc in desc.operations: if not soc.hasInput(): continue self.messages.append(MessageWriter()) self.messages[-1].setUp(soc, port, input=True) if soc.hasOutput(): self.messages.append(MessageWriter()) self.messages[-1].setUp(soc, port, input=False) for e in port.extensions: if isinstance(e, WSDLTools.SoapAddressBinding): sd.location = e.location self.setUpMethods(port) self.setUpClassDef(service) self.setUpInitDef(service)
python
def fromWSDL(self, wsdl): '''setup the service description from WSDL, should not need to override. ''' assert isinstance(wsdl, WSDLTools.WSDL), 'expecting WSDL instance' if len(wsdl.services) == 0: raise WsdlGeneratorError, 'No service defined' self.reset() self.wsdl = wsdl self.setUpHeader() self.setUpImports() for service in wsdl.services: sd = self._service_class(service.name) self._services[service.name] = sd for port in service.ports: desc = BindingDescription(wsdl=wsdl) try: desc.setUp(port.getBinding()) except Wsdl2PythonError, ex: continue for soc in desc.operations: if not soc.hasInput(): continue self.messages.append(MessageWriter()) self.messages[-1].setUp(soc, port, input=True) if soc.hasOutput(): self.messages.append(MessageWriter()) self.messages[-1].setUp(soc, port, input=False) for e in port.extensions: if isinstance(e, WSDLTools.SoapAddressBinding): sd.location = e.location self.setUpMethods(port) self.setUpClassDef(service) self.setUpInitDef(service)
[ "def", "fromWSDL", "(", "self", ",", "wsdl", ")", ":", "assert", "isinstance", "(", "wsdl", ",", "WSDLTools", ".", "WSDL", ")", ",", "'expecting WSDL instance'", "if", "len", "(", "wsdl", ".", "services", ")", "==", "0", ":", "raise", "WsdlGeneratorError", ",", "'No service defined'", "self", ".", "reset", "(", ")", "self", ".", "wsdl", "=", "wsdl", "self", ".", "setUpHeader", "(", ")", "self", ".", "setUpImports", "(", ")", "for", "service", "in", "wsdl", ".", "services", ":", "sd", "=", "self", ".", "_service_class", "(", "service", ".", "name", ")", "self", ".", "_services", "[", "service", ".", "name", "]", "=", "sd", "for", "port", "in", "service", ".", "ports", ":", "desc", "=", "BindingDescription", "(", "wsdl", "=", "wsdl", ")", "try", ":", "desc", ".", "setUp", "(", "port", ".", "getBinding", "(", ")", ")", "except", "Wsdl2PythonError", ",", "ex", ":", "continue", "for", "soc", "in", "desc", ".", "operations", ":", "if", "not", "soc", ".", "hasInput", "(", ")", ":", "continue", "self", ".", "messages", ".", "append", "(", "MessageWriter", "(", ")", ")", "self", ".", "messages", "[", "-", "1", "]", ".", "setUp", "(", "soc", ",", "port", ",", "input", "=", "True", ")", "if", "soc", ".", "hasOutput", "(", ")", ":", "self", ".", "messages", ".", "append", "(", "MessageWriter", "(", ")", ")", "self", ".", "messages", "[", "-", "1", "]", ".", "setUp", "(", "soc", ",", "port", ",", "input", "=", "False", ")", "for", "e", "in", "port", ".", "extensions", ":", "if", "isinstance", "(", "e", ",", "WSDLTools", ".", "SoapAddressBinding", ")", ":", "sd", ".", "location", "=", "e", ".", "location", "self", ".", "setUpMethods", "(", "port", ")", "self", ".", "setUpClassDef", "(", "service", ")", "self", ".", "setUpInitDef", "(", "service", ")" ]
setup the service description from WSDL, should not need to override.
[ "setup", "the", "service", "description", "from", "WSDL", "should", "not", "need", "to", "override", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2dispatch.py#L274-L315
245,024
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/wsdl2dispatch.py
WSAServiceModuleWriter.setUpClassDef
def setUpClassDef(self, service): '''use soapAction dict for WS-Action input, setup wsAction dict for grabbing WS-Action output values. ''' assert isinstance(service, WSDLTools.Service), \ 'expecting WSDLTools.Service instance' s = self._services[service.name].classdef print >>s, 'class %s(%s):' %(self.getClassName(service.name), self.base_class_name) print >>s, '%ssoapAction = {}' % self.getIndent(level=1) print >>s, '%swsAction = {}' % self.getIndent(level=1) print >>s, '%sroot = {}' % self.getIndent(level=1)
python
def setUpClassDef(self, service): '''use soapAction dict for WS-Action input, setup wsAction dict for grabbing WS-Action output values. ''' assert isinstance(service, WSDLTools.Service), \ 'expecting WSDLTools.Service instance' s = self._services[service.name].classdef print >>s, 'class %s(%s):' %(self.getClassName(service.name), self.base_class_name) print >>s, '%ssoapAction = {}' % self.getIndent(level=1) print >>s, '%swsAction = {}' % self.getIndent(level=1) print >>s, '%sroot = {}' % self.getIndent(level=1)
[ "def", "setUpClassDef", "(", "self", ",", "service", ")", ":", "assert", "isinstance", "(", "service", ",", "WSDLTools", ".", "Service", ")", ",", "'expecting WSDLTools.Service instance'", "s", "=", "self", ".", "_services", "[", "service", ".", "name", "]", ".", "classdef", "print", ">>", "s", ",", "'class %s(%s):'", "%", "(", "self", ".", "getClassName", "(", "service", ".", "name", ")", ",", "self", ".", "base_class_name", ")", "print", ">>", "s", ",", "'%ssoapAction = {}'", "%", "self", ".", "getIndent", "(", "level", "=", "1", ")", "print", ">>", "s", ",", "'%swsAction = {}'", "%", "self", ".", "getIndent", "(", "level", "=", "1", ")", "print", ">>", "s", ",", "'%sroot = {}'", "%", "self", ".", "getIndent", "(", "level", "=", "1", ")" ]
use soapAction dict for WS-Action input, setup wsAction dict for grabbing WS-Action output values.
[ "use", "soapAction", "dict", "for", "WS", "-", "Action", "input", "setup", "wsAction", "dict", "for", "grabbing", "WS", "-", "Action", "output", "values", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2dispatch.py#L347-L358
245,025
TC01/calcpkg
calcrepo/repos/__init__.py
createRepoObjects
def createRepoObjects(): """Imports each 'plugin' in this package and creates a repo file from it""" repositories = {} repodir = os.path.join(getScriptLocation()) for importer, name, ispkg in pkgutil.iter_modules([repodir]): module = importer.find_module(name).load_module(name) repo_name = module.name if module.enabled: repositories[repo_name] = module.getRepository() return repositories
python
def createRepoObjects(): """Imports each 'plugin' in this package and creates a repo file from it""" repositories = {} repodir = os.path.join(getScriptLocation()) for importer, name, ispkg in pkgutil.iter_modules([repodir]): module = importer.find_module(name).load_module(name) repo_name = module.name if module.enabled: repositories[repo_name] = module.getRepository() return repositories
[ "def", "createRepoObjects", "(", ")", ":", "repositories", "=", "{", "}", "repodir", "=", "os", ".", "path", ".", "join", "(", "getScriptLocation", "(", ")", ")", "for", "importer", ",", "name", ",", "ispkg", "in", "pkgutil", ".", "iter_modules", "(", "[", "repodir", "]", ")", ":", "module", "=", "importer", ".", "find_module", "(", "name", ")", ".", "load_module", "(", "name", ")", "repo_name", "=", "module", ".", "name", "if", "module", ".", "enabled", ":", "repositories", "[", "repo_name", "]", "=", "module", ".", "getRepository", "(", ")", "return", "repositories" ]
Imports each 'plugin' in this package and creates a repo file from it
[ "Imports", "each", "plugin", "in", "this", "package", "and", "creates", "a", "repo", "file", "from", "it" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/__init__.py#L7-L17
245,026
sliem/barrett
barrett/posterior.py
twoD.credibleregions
def credibleregions(self, probs): """ Calculates the credible regions. """ return [brentq(lambda l: self.pdf[self.pdf > l].sum() - p, 0.0, 1.0) for p in probs]
python
def credibleregions(self, probs): """ Calculates the credible regions. """ return [brentq(lambda l: self.pdf[self.pdf > l].sum() - p, 0.0, 1.0) for p in probs]
[ "def", "credibleregions", "(", "self", ",", "probs", ")", ":", "return", "[", "brentq", "(", "lambda", "l", ":", "self", ".", "pdf", "[", "self", ".", "pdf", ">", "l", "]", ".", "sum", "(", ")", "-", "p", ",", "0.0", ",", "1.0", ")", "for", "p", "in", "probs", "]" ]
Calculates the credible regions.
[ "Calculates", "the", "credible", "regions", "." ]
d48e96591577d1fcecd50c21a9be71573218cde7
https://github.com/sliem/barrett/blob/d48e96591577d1fcecd50c21a9be71573218cde7/barrett/posterior.py#L134-L138
245,027
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/decoders/parser_yaml.py
decode
def decode(data): """ Handles decoding of the YAML `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data. """ decoded = None try: decoded = yaml.load(data) except Exception, e: e = e.message if e.message else str(e) raise MetaParsingException("Can't parse your YAML data: %s" % e) decoded = validator.check_structure(decoded) return decoded
python
def decode(data): """ Handles decoding of the YAML `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data. """ decoded = None try: decoded = yaml.load(data) except Exception, e: e = e.message if e.message else str(e) raise MetaParsingException("Can't parse your YAML data: %s" % e) decoded = validator.check_structure(decoded) return decoded
[ "def", "decode", "(", "data", ")", ":", "decoded", "=", "None", "try", ":", "decoded", "=", "yaml", ".", "load", "(", "data", ")", "except", "Exception", ",", "e", ":", "e", "=", "e", ".", "message", "if", "e", ".", "message", "else", "str", "(", "e", ")", "raise", "MetaParsingException", "(", "\"Can't parse your YAML data: %s\"", "%", "e", ")", "decoded", "=", "validator", ".", "check_structure", "(", "decoded", ")", "return", "decoded" ]
Handles decoding of the YAML `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data.
[ "Handles", "decoding", "of", "the", "YAML", "data", "." ]
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/decoders/parser_yaml.py#L32-L51
245,028
ch3pjw/junction
jcn/terminal.py
_override_sugar
def _override_sugar(func): '''Use this decorator to override an attribute that is specified in blessings' sugar dict with your own function that adds some additional functionality. ''' attr_name = func.__name__ @property @wraps(func) def func_that_uses_terminal_sugar(self): func(self) return self.__getattr__(attr_name) return func_that_uses_terminal_sugar
python
def _override_sugar(func): '''Use this decorator to override an attribute that is specified in blessings' sugar dict with your own function that adds some additional functionality. ''' attr_name = func.__name__ @property @wraps(func) def func_that_uses_terminal_sugar(self): func(self) return self.__getattr__(attr_name) return func_that_uses_terminal_sugar
[ "def", "_override_sugar", "(", "func", ")", ":", "attr_name", "=", "func", ".", "__name__", "@", "property", "@", "wraps", "(", "func", ")", "def", "func_that_uses_terminal_sugar", "(", "self", ")", ":", "func", "(", "self", ")", "return", "self", ".", "__getattr__", "(", "attr_name", ")", "return", "func_that_uses_terminal_sugar" ]
Use this decorator to override an attribute that is specified in blessings' sugar dict with your own function that adds some additional functionality.
[ "Use", "this", "decorator", "to", "override", "an", "attribute", "that", "is", "specified", "in", "blessings", "sugar", "dict", "with", "your", "own", "function", "that", "adds", "some", "additional", "functionality", "." ]
7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e
https://github.com/ch3pjw/junction/blob/7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e/jcn/terminal.py#L27-L38
245,029
ch3pjw/junction
jcn/terminal.py
Terminal.unbuffered_input
def unbuffered_input(self): '''Context manager for setting the terminal to use unbuffered input. Normally, your terminal will collect together a user's input keystrokes and deliver them to you in one neat parcel when they hit the return/enter key. In a real-time interactive application we instead want to receive each keystroke as it happens. This context manager achieves that by setting 'cbreak' mode on the the output tty stream. cbreak is a mode inbetween 'cooked mode', where all the user's input is preprocessed, and 'raw mode' where none of it is. Basically, in cbreak mode input like :kbd:`Control-c` will still interrupt (i.e. 'break') the process, hence the name. Wikipedia is your friend on this one! :meth:`Root.run` uses this context manager for you to make your application work in the correct way. ''' if self.is_a_tty: orig_tty_attrs = termios.tcgetattr(self.stream) tty.setcbreak(self.stream) try: yield finally: termios.tcsetattr( self.stream, termios.TCSADRAIN, orig_tty_attrs) else: yield
python
def unbuffered_input(self): '''Context manager for setting the terminal to use unbuffered input. Normally, your terminal will collect together a user's input keystrokes and deliver them to you in one neat parcel when they hit the return/enter key. In a real-time interactive application we instead want to receive each keystroke as it happens. This context manager achieves that by setting 'cbreak' mode on the the output tty stream. cbreak is a mode inbetween 'cooked mode', where all the user's input is preprocessed, and 'raw mode' where none of it is. Basically, in cbreak mode input like :kbd:`Control-c` will still interrupt (i.e. 'break') the process, hence the name. Wikipedia is your friend on this one! :meth:`Root.run` uses this context manager for you to make your application work in the correct way. ''' if self.is_a_tty: orig_tty_attrs = termios.tcgetattr(self.stream) tty.setcbreak(self.stream) try: yield finally: termios.tcsetattr( self.stream, termios.TCSADRAIN, orig_tty_attrs) else: yield
[ "def", "unbuffered_input", "(", "self", ")", ":", "if", "self", ".", "is_a_tty", ":", "orig_tty_attrs", "=", "termios", ".", "tcgetattr", "(", "self", ".", "stream", ")", "tty", ".", "setcbreak", "(", "self", ".", "stream", ")", "try", ":", "yield", "finally", ":", "termios", ".", "tcsetattr", "(", "self", ".", "stream", ",", "termios", ".", "TCSADRAIN", ",", "orig_tty_attrs", ")", "else", ":", "yield" ]
Context manager for setting the terminal to use unbuffered input. Normally, your terminal will collect together a user's input keystrokes and deliver them to you in one neat parcel when they hit the return/enter key. In a real-time interactive application we instead want to receive each keystroke as it happens. This context manager achieves that by setting 'cbreak' mode on the the output tty stream. cbreak is a mode inbetween 'cooked mode', where all the user's input is preprocessed, and 'raw mode' where none of it is. Basically, in cbreak mode input like :kbd:`Control-c` will still interrupt (i.e. 'break') the process, hence the name. Wikipedia is your friend on this one! :meth:`Root.run` uses this context manager for you to make your application work in the correct way.
[ "Context", "manager", "for", "setting", "the", "terminal", "to", "use", "unbuffered", "input", "." ]
7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e
https://github.com/ch3pjw/junction/blob/7d0c4d279589bee8ae7b3ac4dee2ab425c0b1b0e/jcn/terminal.py#L156-L183
245,030
SkyLothar/shcmd
shcmd/proc.py
Proc.raise_for_error
def raise_for_error(self): """ raise `ShCmdError` if the proc's return_code is not 0 otherwise return self ..Usage:: >>> proc = shcmd.run("ls").raise_for_error() >>> proc.return_code == 0 True """ if self.ok: return self tip = "running {0} @<{1}> error, return code {2}".format( " ".join(self.cmd), self.cwd, self.return_code ) logger.error("{0}\nstdout:{1}\nstderr:{2}\n".format( tip, self._stdout.decode("utf8"), self._stderr.decode("utf8") )) raise ShCmdError(self)
python
def raise_for_error(self): """ raise `ShCmdError` if the proc's return_code is not 0 otherwise return self ..Usage:: >>> proc = shcmd.run("ls").raise_for_error() >>> proc.return_code == 0 True """ if self.ok: return self tip = "running {0} @<{1}> error, return code {2}".format( " ".join(self.cmd), self.cwd, self.return_code ) logger.error("{0}\nstdout:{1}\nstderr:{2}\n".format( tip, self._stdout.decode("utf8"), self._stderr.decode("utf8") )) raise ShCmdError(self)
[ "def", "raise_for_error", "(", "self", ")", ":", "if", "self", ".", "ok", ":", "return", "self", "tip", "=", "\"running {0} @<{1}> error, return code {2}\"", ".", "format", "(", "\" \"", ".", "join", "(", "self", ".", "cmd", ")", ",", "self", ".", "cwd", ",", "self", ".", "return_code", ")", "logger", ".", "error", "(", "\"{0}\\nstdout:{1}\\nstderr:{2}\\n\"", ".", "format", "(", "tip", ",", "self", ".", "_stdout", ".", "decode", "(", "\"utf8\"", ")", ",", "self", ".", "_stderr", ".", "decode", "(", "\"utf8\"", ")", ")", ")", "raise", "ShCmdError", "(", "self", ")" ]
raise `ShCmdError` if the proc's return_code is not 0 otherwise return self ..Usage:: >>> proc = shcmd.run("ls").raise_for_error() >>> proc.return_code == 0 True
[ "raise", "ShCmdError", "if", "the", "proc", "s", "return_code", "is", "not", "0", "otherwise", "return", "self" ]
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/proc.py#L128-L148
245,031
SkyLothar/shcmd
shcmd/proc.py
Proc._stream
def _stream(self): """execute subprocess with timeout Usage:: >>> with cmd_proc.run_with_timeout() as cmd_proc: ... stdout, stderr = cmd_proc.communicate() ... >>> assert cmd_proc.proc.return_code == 0, "proc exec failed" """ timer = None try: proc = subprocess.Popen( self.cmd, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) timer = threading.Timer( self.timeout, kill_proc, [proc, self.cmd, time.time()] ) timer.start() yield proc finally: if timer is not None: timer.cancel()
python
def _stream(self): """execute subprocess with timeout Usage:: >>> with cmd_proc.run_with_timeout() as cmd_proc: ... stdout, stderr = cmd_proc.communicate() ... >>> assert cmd_proc.proc.return_code == 0, "proc exec failed" """ timer = None try: proc = subprocess.Popen( self.cmd, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) timer = threading.Timer( self.timeout, kill_proc, [proc, self.cmd, time.time()] ) timer.start() yield proc finally: if timer is not None: timer.cancel()
[ "def", "_stream", "(", "self", ")", ":", "timer", "=", "None", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "self", ".", "cmd", ",", "cwd", "=", "self", ".", "cwd", ",", "env", "=", "self", ".", "env", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "timer", "=", "threading", ".", "Timer", "(", "self", ".", "timeout", ",", "kill_proc", ",", "[", "proc", ",", "self", ".", "cmd", ",", "time", ".", "time", "(", ")", "]", ")", "timer", ".", "start", "(", ")", "yield", "proc", "finally", ":", "if", "timer", "is", "not", "None", ":", "timer", ".", "cancel", "(", ")" ]
execute subprocess with timeout Usage:: >>> with cmd_proc.run_with_timeout() as cmd_proc: ... stdout, stderr = cmd_proc.communicate() ... >>> assert cmd_proc.proc.return_code == 0, "proc exec failed"
[ "execute", "subprocess", "with", "timeout" ]
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/proc.py#L151-L177
245,032
SkyLothar/shcmd
shcmd/proc.py
Proc.iter_lines
def iter_lines(self, warn_only=False): """yields stdout text, line by line.""" remain = "" for data in self.iter_content(LINE_CHUNK_SIZE, warn_only=True): line_break_found = data[-1] in (b"\n", b"\r") lines = data.decode(self.codec).splitlines() lines[0] = remain + lines[0] if not line_break_found: remain = lines.pop() for line in lines: yield line if remain: yield remain self._state = FINISHED if not warn_only: self.raise_for_error()
python
def iter_lines(self, warn_only=False): """yields stdout text, line by line.""" remain = "" for data in self.iter_content(LINE_CHUNK_SIZE, warn_only=True): line_break_found = data[-1] in (b"\n", b"\r") lines = data.decode(self.codec).splitlines() lines[0] = remain + lines[0] if not line_break_found: remain = lines.pop() for line in lines: yield line if remain: yield remain self._state = FINISHED if not warn_only: self.raise_for_error()
[ "def", "iter_lines", "(", "self", ",", "warn_only", "=", "False", ")", ":", "remain", "=", "\"\"", "for", "data", "in", "self", ".", "iter_content", "(", "LINE_CHUNK_SIZE", ",", "warn_only", "=", "True", ")", ":", "line_break_found", "=", "data", "[", "-", "1", "]", "in", "(", "b\"\\n\"", ",", "b\"\\r\"", ")", "lines", "=", "data", ".", "decode", "(", "self", ".", "codec", ")", ".", "splitlines", "(", ")", "lines", "[", "0", "]", "=", "remain", "+", "lines", "[", "0", "]", "if", "not", "line_break_found", ":", "remain", "=", "lines", ".", "pop", "(", ")", "for", "line", "in", "lines", ":", "yield", "line", "if", "remain", ":", "yield", "remain", "self", ".", "_state", "=", "FINISHED", "if", "not", "warn_only", ":", "self", ".", "raise_for_error", "(", ")" ]
yields stdout text, line by line.
[ "yields", "stdout", "text", "line", "by", "line", "." ]
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/proc.py#L179-L195
245,033
SkyLothar/shcmd
shcmd/proc.py
Proc.iter_content
def iter_content(self, chunk_size=1, warn_only=False): """ yields stdout data, chunk by chunk :param chunk_size: size of each chunk (in bytes) """ self._state = "not finished" if self.return_code is not None: stdout = io.BytesIO(self._stdout) data = stdout.read(chunk_size) while data: yield data data = stdout.read(chunk_size) else: data = b'' started_at = time.time() with self._stream() as proc: while proc.poll() is None: chunk = proc.stdout.read(chunk_size) if not chunk: continue yield chunk data += chunk if proc.returncode == -9: elapsed = time.time() - started_at self._state = "timeouted" raise subprocess.TimeoutExpired(proc.args, elapsed) chunk = proc.stdout.read(chunk_size) while chunk: yield chunk data += chunk chunk = proc.stdout.read(chunk_size) self._return_code = proc.returncode self._stderr = proc.stderr.read() self._stdout = data self._state = FINISHED if not warn_only: self.raise_for_error()
python
def iter_content(self, chunk_size=1, warn_only=False): """ yields stdout data, chunk by chunk :param chunk_size: size of each chunk (in bytes) """ self._state = "not finished" if self.return_code is not None: stdout = io.BytesIO(self._stdout) data = stdout.read(chunk_size) while data: yield data data = stdout.read(chunk_size) else: data = b'' started_at = time.time() with self._stream() as proc: while proc.poll() is None: chunk = proc.stdout.read(chunk_size) if not chunk: continue yield chunk data += chunk if proc.returncode == -9: elapsed = time.time() - started_at self._state = "timeouted" raise subprocess.TimeoutExpired(proc.args, elapsed) chunk = proc.stdout.read(chunk_size) while chunk: yield chunk data += chunk chunk = proc.stdout.read(chunk_size) self._return_code = proc.returncode self._stderr = proc.stderr.read() self._stdout = data self._state = FINISHED if not warn_only: self.raise_for_error()
[ "def", "iter_content", "(", "self", ",", "chunk_size", "=", "1", ",", "warn_only", "=", "False", ")", ":", "self", ".", "_state", "=", "\"not finished\"", "if", "self", ".", "return_code", "is", "not", "None", ":", "stdout", "=", "io", ".", "BytesIO", "(", "self", ".", "_stdout", ")", "data", "=", "stdout", ".", "read", "(", "chunk_size", ")", "while", "data", ":", "yield", "data", "data", "=", "stdout", ".", "read", "(", "chunk_size", ")", "else", ":", "data", "=", "b''", "started_at", "=", "time", ".", "time", "(", ")", "with", "self", ".", "_stream", "(", ")", "as", "proc", ":", "while", "proc", ".", "poll", "(", ")", "is", "None", ":", "chunk", "=", "proc", ".", "stdout", ".", "read", "(", "chunk_size", ")", "if", "not", "chunk", ":", "continue", "yield", "chunk", "data", "+=", "chunk", "if", "proc", ".", "returncode", "==", "-", "9", ":", "elapsed", "=", "time", ".", "time", "(", ")", "-", "started_at", "self", ".", "_state", "=", "\"timeouted\"", "raise", "subprocess", ".", "TimeoutExpired", "(", "proc", ".", "args", ",", "elapsed", ")", "chunk", "=", "proc", ".", "stdout", ".", "read", "(", "chunk_size", ")", "while", "chunk", ":", "yield", "chunk", "data", "+=", "chunk", "chunk", "=", "proc", ".", "stdout", ".", "read", "(", "chunk_size", ")", "self", ".", "_return_code", "=", "proc", ".", "returncode", "self", ".", "_stderr", "=", "proc", ".", "stderr", ".", "read", "(", ")", "self", ".", "_stdout", "=", "data", "self", ".", "_state", "=", "FINISHED", "if", "not", "warn_only", ":", "self", ".", "raise_for_error", "(", ")" ]
yields stdout data, chunk by chunk :param chunk_size: size of each chunk (in bytes)
[ "yields", "stdout", "data", "chunk", "by", "chunk" ]
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/proc.py#L197-L238
245,034
SkyLothar/shcmd
shcmd/proc.py
Proc.block
def block(self, warn_only=False): """blocked executation.""" self._state = "not finished" if self._return_code is None: proc = subprocess.Popen( self.cmd, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) self._stdout, self._stderr = proc.communicate(timeout=self.timeout) self._return_code = proc.returncode self._state = FINISHED if not warn_only: self.raise_for_error()
python
def block(self, warn_only=False): """blocked executation.""" self._state = "not finished" if self._return_code is None: proc = subprocess.Popen( self.cmd, cwd=self.cwd, env=self.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) self._stdout, self._stderr = proc.communicate(timeout=self.timeout) self._return_code = proc.returncode self._state = FINISHED if not warn_only: self.raise_for_error()
[ "def", "block", "(", "self", ",", "warn_only", "=", "False", ")", ":", "self", ".", "_state", "=", "\"not finished\"", "if", "self", ".", "_return_code", "is", "None", ":", "proc", "=", "subprocess", ".", "Popen", "(", "self", ".", "cmd", ",", "cwd", "=", "self", ".", "cwd", ",", "env", "=", "self", ".", "env", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "self", ".", "_stdout", ",", "self", ".", "_stderr", "=", "proc", ".", "communicate", "(", "timeout", "=", "self", ".", "timeout", ")", "self", ".", "_return_code", "=", "proc", ".", "returncode", "self", ".", "_state", "=", "FINISHED", "if", "not", "warn_only", ":", "self", ".", "raise_for_error", "(", ")" ]
blocked executation.
[ "blocked", "executation", "." ]
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/proc.py#L240-L254
245,035
etcher-be/emiz
emiz/avwx/summary.py
metar
def metar(trans: MetarTrans) -> str: """ Condense the translation strings into a single report summary string """ summary = [] if trans.wind: summary.append('Winds ' + trans.wind) if trans.visibility: summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower()) if trans.temperature: summary.append('Temp ' + trans.temperature[:trans.temperature.find(' (')]) if trans.dewpoint: summary.append('Dew ' + trans.dewpoint[:trans.dewpoint.find(' (')]) if trans.altimeter: summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')]) if trans.other: summary.append(trans.other) if trans.clouds: summary.append(trans.clouds.replace(' - Reported AGL', '')) return ', '.join(summary)
python
def metar(trans: MetarTrans) -> str: """ Condense the translation strings into a single report summary string """ summary = [] if trans.wind: summary.append('Winds ' + trans.wind) if trans.visibility: summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower()) if trans.temperature: summary.append('Temp ' + trans.temperature[:trans.temperature.find(' (')]) if trans.dewpoint: summary.append('Dew ' + trans.dewpoint[:trans.dewpoint.find(' (')]) if trans.altimeter: summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')]) if trans.other: summary.append(trans.other) if trans.clouds: summary.append(trans.clouds.replace(' - Reported AGL', '')) return ', '.join(summary)
[ "def", "metar", "(", "trans", ":", "MetarTrans", ")", "->", "str", ":", "summary", "=", "[", "]", "if", "trans", ".", "wind", ":", "summary", ".", "append", "(", "'Winds '", "+", "trans", ".", "wind", ")", "if", "trans", ".", "visibility", ":", "summary", ".", "append", "(", "'Vis '", "+", "trans", ".", "visibility", "[", ":", "trans", ".", "visibility", ".", "find", "(", "' ('", ")", "]", ".", "lower", "(", ")", ")", "if", "trans", ".", "temperature", ":", "summary", ".", "append", "(", "'Temp '", "+", "trans", ".", "temperature", "[", ":", "trans", ".", "temperature", ".", "find", "(", "' ('", ")", "]", ")", "if", "trans", ".", "dewpoint", ":", "summary", ".", "append", "(", "'Dew '", "+", "trans", ".", "dewpoint", "[", ":", "trans", ".", "dewpoint", ".", "find", "(", "' ('", ")", "]", ")", "if", "trans", ".", "altimeter", ":", "summary", ".", "append", "(", "'Alt '", "+", "trans", ".", "altimeter", "[", ":", "trans", ".", "altimeter", ".", "find", "(", "' ('", ")", "]", ")", "if", "trans", ".", "other", ":", "summary", ".", "append", "(", "trans", ".", "other", ")", "if", "trans", ".", "clouds", ":", "summary", ".", "append", "(", "trans", ".", "clouds", ".", "replace", "(", "' - Reported AGL'", ",", "''", ")", ")", "return", "', '", ".", "join", "(", "summary", ")" ]
Condense the translation strings into a single report summary string
[ "Condense", "the", "translation", "strings", "into", "a", "single", "report", "summary", "string" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/summary.py#L9-L28
245,036
etcher-be/emiz
emiz/avwx/summary.py
taf
def taf(trans: TafLineTrans) -> str: """ Condense the translation strings into a single forecast summary string """ summary = [] if trans.wind: summary.append('Winds ' + trans.wind) if trans.visibility: summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower()) if trans.altimeter: summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')]) if trans.other: summary.append(trans.other) if trans.clouds: summary.append(trans.clouds.replace(' - Reported AGL', '')) if trans.wind_shear: summary.append(trans.wind_shear) if trans.turbulance: summary.append(trans.turbulance) if trans.icing: summary.append(trans.icing) return ', '.join(summary)
python
def taf(trans: TafLineTrans) -> str: """ Condense the translation strings into a single forecast summary string """ summary = [] if trans.wind: summary.append('Winds ' + trans.wind) if trans.visibility: summary.append('Vis ' + trans.visibility[:trans.visibility.find(' (')].lower()) if trans.altimeter: summary.append('Alt ' + trans.altimeter[:trans.altimeter.find(' (')]) if trans.other: summary.append(trans.other) if trans.clouds: summary.append(trans.clouds.replace(' - Reported AGL', '')) if trans.wind_shear: summary.append(trans.wind_shear) if trans.turbulance: summary.append(trans.turbulance) if trans.icing: summary.append(trans.icing) return ', '.join(summary)
[ "def", "taf", "(", "trans", ":", "TafLineTrans", ")", "->", "str", ":", "summary", "=", "[", "]", "if", "trans", ".", "wind", ":", "summary", ".", "append", "(", "'Winds '", "+", "trans", ".", "wind", ")", "if", "trans", ".", "visibility", ":", "summary", ".", "append", "(", "'Vis '", "+", "trans", ".", "visibility", "[", ":", "trans", ".", "visibility", ".", "find", "(", "' ('", ")", "]", ".", "lower", "(", ")", ")", "if", "trans", ".", "altimeter", ":", "summary", ".", "append", "(", "'Alt '", "+", "trans", ".", "altimeter", "[", ":", "trans", ".", "altimeter", ".", "find", "(", "' ('", ")", "]", ")", "if", "trans", ".", "other", ":", "summary", ".", "append", "(", "trans", ".", "other", ")", "if", "trans", ".", "clouds", ":", "summary", ".", "append", "(", "trans", ".", "clouds", ".", "replace", "(", "' - Reported AGL'", ",", "''", ")", ")", "if", "trans", ".", "wind_shear", ":", "summary", ".", "append", "(", "trans", ".", "wind_shear", ")", "if", "trans", ".", "turbulance", ":", "summary", ".", "append", "(", "trans", ".", "turbulance", ")", "if", "trans", ".", "icing", ":", "summary", ".", "append", "(", "trans", ".", "icing", ")", "return", "', '", ".", "join", "(", "summary", ")" ]
Condense the translation strings into a single forecast summary string
[ "Condense", "the", "translation", "strings", "into", "a", "single", "forecast", "summary", "string" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/summary.py#L31-L52
245,037
ella/django-markup
djangomarkup/views.py
transform
def transform(request, syntax_processor_name=None, var_name="text"): """ Returns rendered HTML for source text """ if request.method != 'POST': return HttpResponseNotAllowed("Only POST allowed") source = request.POST.get(var_name) if not source: return HttpResponse('') processor = TextProcessor.objects.get(name=syntax_processor_name or getattr(settings, "DEFAULT_MARKUP", "markdown")) output = processor.convert(source) try: t = template.Template(output, name='markup_preview') output = t.render(template.Context({'MEDIA_URL' : settings.MEDIA_URL})) except template.TemplateSyntaxError, e: log.warning('Error in preview rendering: %s' % e) output = '<h3 style="color:red">%s</h3><p>%s</p>' % (ugettext('You have an errors in source text!'), e) return HttpResponse(output)
python
def transform(request, syntax_processor_name=None, var_name="text"): """ Returns rendered HTML for source text """ if request.method != 'POST': return HttpResponseNotAllowed("Only POST allowed") source = request.POST.get(var_name) if not source: return HttpResponse('') processor = TextProcessor.objects.get(name=syntax_processor_name or getattr(settings, "DEFAULT_MARKUP", "markdown")) output = processor.convert(source) try: t = template.Template(output, name='markup_preview') output = t.render(template.Context({'MEDIA_URL' : settings.MEDIA_URL})) except template.TemplateSyntaxError, e: log.warning('Error in preview rendering: %s' % e) output = '<h3 style="color:red">%s</h3><p>%s</p>' % (ugettext('You have an errors in source text!'), e) return HttpResponse(output)
[ "def", "transform", "(", "request", ",", "syntax_processor_name", "=", "None", ",", "var_name", "=", "\"text\"", ")", ":", "if", "request", ".", "method", "!=", "'POST'", ":", "return", "HttpResponseNotAllowed", "(", "\"Only POST allowed\"", ")", "source", "=", "request", ".", "POST", ".", "get", "(", "var_name", ")", "if", "not", "source", ":", "return", "HttpResponse", "(", "''", ")", "processor", "=", "TextProcessor", ".", "objects", ".", "get", "(", "name", "=", "syntax_processor_name", "or", "getattr", "(", "settings", ",", "\"DEFAULT_MARKUP\"", ",", "\"markdown\"", ")", ")", "output", "=", "processor", ".", "convert", "(", "source", ")", "try", ":", "t", "=", "template", ".", "Template", "(", "output", ",", "name", "=", "'markup_preview'", ")", "output", "=", "t", ".", "render", "(", "template", ".", "Context", "(", "{", "'MEDIA_URL'", ":", "settings", ".", "MEDIA_URL", "}", ")", ")", "except", "template", ".", "TemplateSyntaxError", ",", "e", ":", "log", ".", "warning", "(", "'Error in preview rendering: %s'", "%", "e", ")", "output", "=", "'<h3 style=\"color:red\">%s</h3><p>%s</p>'", "%", "(", "ugettext", "(", "'You have an errors in source text!'", ")", ",", "e", ")", "return", "HttpResponse", "(", "output", ")" ]
Returns rendered HTML for source text
[ "Returns", "rendered", "HTML", "for", "source", "text" ]
45b4b60bc44f38f0a05b54173318951e951ca7ce
https://github.com/ella/django-markup/blob/45b4b60bc44f38f0a05b54173318951e951ca7ce/djangomarkup/views.py#L12-L33
245,038
sassoo/goldman
goldman/utils/model_helpers.py
rtype_to_model
def rtype_to_model(rtype): """ Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError """ models = goldman.config.MODELS for model in models: if rtype.lower() == model.RTYPE.lower(): return model raise ValueError('%s resource type not registered' % rtype)
python
def rtype_to_model(rtype): """ Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError """ models = goldman.config.MODELS for model in models: if rtype.lower() == model.RTYPE.lower(): return model raise ValueError('%s resource type not registered' % rtype)
[ "def", "rtype_to_model", "(", "rtype", ")", ":", "models", "=", "goldman", ".", "config", ".", "MODELS", "for", "model", "in", "models", ":", "if", "rtype", ".", "lower", "(", ")", "==", "model", ".", "RTYPE", ".", "lower", "(", ")", ":", "return", "model", "raise", "ValueError", "(", "'%s resource type not registered'", "%", "rtype", ")" ]
Return a model class object given a string resource type :param rtype: string resource type :return: model class object :raise: ValueError
[ "Return", "a", "model", "class", "object", "given", "a", "string", "resource", "type" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/model_helpers.py#L14-L29
245,039
walkermatt/spitslurp
spitslurp/__init__.py
spit
def spit(path, txt, encoding='UTF-8', append=False): """ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt
python
def spit(path, txt, encoding='UTF-8', append=False): """ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt
[ "def", "spit", "(", "path", ",", "txt", ",", "encoding", "=", "'UTF-8'", ",", "append", "=", "False", ")", ":", "mode", "=", "'a'", "if", "append", "else", "'w'", "with", "io", ".", "open", "(", "path", ",", "mode", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "f", ".", "write", "(", "txt", ")", "return", "txt" ]
Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string
[ "Write", "a", "unicode", "string", "txt", "to", "file", "path", "." ]
8216400c59696b1ee0d708cc6e21a7a016966270
https://github.com/walkermatt/spitslurp/blob/8216400c59696b1ee0d708cc6e21a7a016966270/spitslurp/__init__.py#L4-L30
245,040
walkermatt/spitslurp
spitslurp/__init__.py
slurp
def slurp(path, encoding='UTF-8'): """ Reads file `path` and returns the entire contents as a unicode string By default assumes the file is encoded as UTF-8 Parameters ---------- path : str File path to file on disk encoding : str, default `UTF-8`, optional Encoding of the file Returns ------- The txt read from the file as a unicode string """ with io.open(path, 'r', encoding=encoding) as f: return f.read()
python
def slurp(path, encoding='UTF-8'): """ Reads file `path` and returns the entire contents as a unicode string By default assumes the file is encoded as UTF-8 Parameters ---------- path : str File path to file on disk encoding : str, default `UTF-8`, optional Encoding of the file Returns ------- The txt read from the file as a unicode string """ with io.open(path, 'r', encoding=encoding) as f: return f.read()
[ "def", "slurp", "(", "path", ",", "encoding", "=", "'UTF-8'", ")", ":", "with", "io", ".", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Reads file `path` and returns the entire contents as a unicode string By default assumes the file is encoded as UTF-8 Parameters ---------- path : str File path to file on disk encoding : str, default `UTF-8`, optional Encoding of the file Returns ------- The txt read from the file as a unicode string
[ "Reads", "file", "path", "and", "returns", "the", "entire", "contents", "as", "a", "unicode", "string" ]
8216400c59696b1ee0d708cc6e21a7a016966270
https://github.com/walkermatt/spitslurp/blob/8216400c59696b1ee0d708cc6e21a7a016966270/spitslurp/__init__.py#L33-L52
245,041
callowayproject/Transmogrify
transmogrify/utils.py
download_url
def download_url(url, destination): """ Download an external URL to the destination """ from settings import VALID_IMAGE_EXTENSIONS base_name, ext = os.path.splitext(url) ext = ext.lstrip('.') if ext not in VALID_IMAGE_EXTENSIONS: raise Exception("Invalid image extension") base_path, filename = os.path.split(destination) os.makedirs(base_path) urllib.urlretrieve(url, destination)
python
def download_url(url, destination): """ Download an external URL to the destination """ from settings import VALID_IMAGE_EXTENSIONS base_name, ext = os.path.splitext(url) ext = ext.lstrip('.') if ext not in VALID_IMAGE_EXTENSIONS: raise Exception("Invalid image extension") base_path, filename = os.path.split(destination) os.makedirs(base_path) urllib.urlretrieve(url, destination)
[ "def", "download_url", "(", "url", ",", "destination", ")", ":", "from", "settings", "import", "VALID_IMAGE_EXTENSIONS", "base_name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "url", ")", "ext", "=", "ext", ".", "lstrip", "(", "'.'", ")", "if", "ext", "not", "in", "VALID_IMAGE_EXTENSIONS", ":", "raise", "Exception", "(", "\"Invalid image extension\"", ")", "base_path", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "destination", ")", "os", ".", "makedirs", "(", "base_path", ")", "urllib", ".", "urlretrieve", "(", "url", ",", "destination", ")" ]
Download an external URL to the destination
[ "Download", "an", "external", "URL", "to", "the", "destination" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/utils.py#L47-L60
245,042
callowayproject/Transmogrify
transmogrify/utils.py
create_securityhash
def create_securityhash(action_tuples): """ Create a SHA1 hash based on the KEY and action string """ from settings import SECRET_KEY action_string = "".join(["_%s%s" % a for a in action_tuples]) security_hash = sha1(action_string + SECRET_KEY).hexdigest() return security_hash
python
def create_securityhash(action_tuples): """ Create a SHA1 hash based on the KEY and action string """ from settings import SECRET_KEY action_string = "".join(["_%s%s" % a for a in action_tuples]) security_hash = sha1(action_string + SECRET_KEY).hexdigest() return security_hash
[ "def", "create_securityhash", "(", "action_tuples", ")", ":", "from", "settings", "import", "SECRET_KEY", "action_string", "=", "\"\"", ".", "join", "(", "[", "\"_%s%s\"", "%", "a", "for", "a", "in", "action_tuples", "]", ")", "security_hash", "=", "sha1", "(", "action_string", "+", "SECRET_KEY", ")", ".", "hexdigest", "(", ")", "return", "security_hash" ]
Create a SHA1 hash based on the KEY and action string
[ "Create", "a", "SHA1", "hash", "based", "on", "the", "KEY", "and", "action", "string" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/utils.py#L70-L78
245,043
callowayproject/Transmogrify
transmogrify/utils.py
resolve_request_path
def resolve_request_path(requested_uri): """ Check for any aliases and alter the path accordingly. Returns resolved_uri """ from settings import PATH_ALIASES for key, val in PATH_ALIASES.items(): if re.match(key, requested_uri): return re.sub(key, val, requested_uri) return requested_uri
python
def resolve_request_path(requested_uri): """ Check for any aliases and alter the path accordingly. Returns resolved_uri """ from settings import PATH_ALIASES for key, val in PATH_ALIASES.items(): if re.match(key, requested_uri): return re.sub(key, val, requested_uri) return requested_uri
[ "def", "resolve_request_path", "(", "requested_uri", ")", ":", "from", "settings", "import", "PATH_ALIASES", "for", "key", ",", "val", "in", "PATH_ALIASES", ".", "items", "(", ")", ":", "if", "re", ".", "match", "(", "key", ",", "requested_uri", ")", ":", "return", "re", ".", "sub", "(", "key", ",", "val", ",", "requested_uri", ")", "return", "requested_uri" ]
Check for any aliases and alter the path accordingly. Returns resolved_uri
[ "Check", "for", "any", "aliases", "and", "alter", "the", "path", "accordingly", "." ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/utils.py#L110-L121
245,044
callowayproject/Transmogrify
transmogrify/utils.py
get_cached_files
def get_cached_files(url, server_name="", document_root=None): """ Given a URL, return a list of paths of all cached variations of that file. Doesn't include the original file. """ import glob url_info = process_url(url, server_name, document_root, check_security=False) # get path to cache directory with basename of file (no extension) filedir = os.path.dirname(url_info['requested_file']) fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext']) return glob.glob(os.path.join(filedir, fileglob))
python
def get_cached_files(url, server_name="", document_root=None): """ Given a URL, return a list of paths of all cached variations of that file. Doesn't include the original file. """ import glob url_info = process_url(url, server_name, document_root, check_security=False) # get path to cache directory with basename of file (no extension) filedir = os.path.dirname(url_info['requested_file']) fileglob = '{0}*{1}'.format(url_info['base_filename'], url_info['ext']) return glob.glob(os.path.join(filedir, fileglob))
[ "def", "get_cached_files", "(", "url", ",", "server_name", "=", "\"\"", ",", "document_root", "=", "None", ")", ":", "import", "glob", "url_info", "=", "process_url", "(", "url", ",", "server_name", ",", "document_root", ",", "check_security", "=", "False", ")", "# get path to cache directory with basename of file (no extension)", "filedir", "=", "os", ".", "path", ".", "dirname", "(", "url_info", "[", "'requested_file'", "]", ")", "fileglob", "=", "'{0}*{1}'", ".", "format", "(", "url_info", "[", "'base_filename'", "]", ",", "url_info", "[", "'ext'", "]", ")", "return", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "filedir", ",", "fileglob", ")", ")" ]
Given a URL, return a list of paths of all cached variations of that file. Doesn't include the original file.
[ "Given", "a", "URL", "return", "a", "list", "of", "paths", "of", "all", "cached", "variations", "of", "that", "file", "." ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/utils.py#L252-L264
245,045
espenak/djangosenchatools
djangosenchatools/buildserver.py
BuildServerThread.run
def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ server_address = (self.host, self.port) threading = True if threading: httpd_cls = type('WSGIServer', (ThreadingMixIn, WSGIServer), {}) else: httpd_cls = WSGIServer self.httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=False) wsgi_handler = get_internal_wsgi_application() self.httpd.set_app(wsgi_handler) self.is_ready.set() self.httpd.serve_forever()
python
def run(self): """ Sets up the live server and databases, and then loops over handling http requests. """ server_address = (self.host, self.port) threading = True if threading: httpd_cls = type('WSGIServer', (ThreadingMixIn, WSGIServer), {}) else: httpd_cls = WSGIServer self.httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=False) wsgi_handler = get_internal_wsgi_application() self.httpd.set_app(wsgi_handler) self.is_ready.set() self.httpd.serve_forever()
[ "def", "run", "(", "self", ")", ":", "server_address", "=", "(", "self", ".", "host", ",", "self", ".", "port", ")", "threading", "=", "True", "if", "threading", ":", "httpd_cls", "=", "type", "(", "'WSGIServer'", ",", "(", "ThreadingMixIn", ",", "WSGIServer", ")", ",", "{", "}", ")", "else", ":", "httpd_cls", "=", "WSGIServer", "self", ".", "httpd", "=", "httpd_cls", "(", "server_address", ",", "WSGIRequestHandler", ",", "ipv6", "=", "False", ")", "wsgi_handler", "=", "get_internal_wsgi_application", "(", ")", "self", ".", "httpd", ".", "set_app", "(", "wsgi_handler", ")", "self", ".", "is_ready", ".", "set", "(", ")", "self", ".", "httpd", ".", "serve_forever", "(", ")" ]
Sets up the live server and databases, and then loops over handling http requests.
[ "Sets", "up", "the", "live", "server", "and", "databases", "and", "then", "loops", "over", "handling", "http", "requests", "." ]
da1bca9365300de303e833de4b4bd57671c1d11a
https://github.com/espenak/djangosenchatools/blob/da1bca9365300de303e833de4b4bd57671c1d11a/djangosenchatools/buildserver.py#L23-L38
245,046
twidi/py-dataql
dataql/parsers/base.py
BaseParser.visit
def visit(self, node): """Rewrite original method to use lower-case method, and not "generic" function.""" try: # Get the "visit_%s" method, using the lower case version of the rule. method = getattr(self, 'visit_%s' % node.expr_name.lower()) except AttributeError: # If the method is not defined, we do nothing for this node. return # Below is untouched code from the original ``visit`` method. # Call that method, and show where in the tree it failed if it blows up. try: return method(node, [self.visit(n) for n in node]) except (VisitationError, UndefinedLabel): # Don't catch and re-wrap already-wrapped exceptions. raise except self.unwrapped_exceptions: raise except Exception: # Catch any exception, and tack on a parse tree so it's easier to # see where it went wrong. exc_class, exc, trace = sys.exc_info() raise VisitationError(exc, exc_class, node).with_traceback(trace)
python
def visit(self, node): """Rewrite original method to use lower-case method, and not "generic" function.""" try: # Get the "visit_%s" method, using the lower case version of the rule. method = getattr(self, 'visit_%s' % node.expr_name.lower()) except AttributeError: # If the method is not defined, we do nothing for this node. return # Below is untouched code from the original ``visit`` method. # Call that method, and show where in the tree it failed if it blows up. try: return method(node, [self.visit(n) for n in node]) except (VisitationError, UndefinedLabel): # Don't catch and re-wrap already-wrapped exceptions. raise except self.unwrapped_exceptions: raise except Exception: # Catch any exception, and tack on a parse tree so it's easier to # see where it went wrong. exc_class, exc, trace = sys.exc_info() raise VisitationError(exc, exc_class, node).with_traceback(trace)
[ "def", "visit", "(", "self", ",", "node", ")", ":", "try", ":", "# Get the \"visit_%s\" method, using the lower case version of the rule.", "method", "=", "getattr", "(", "self", ",", "'visit_%s'", "%", "node", ".", "expr_name", ".", "lower", "(", ")", ")", "except", "AttributeError", ":", "# If the method is not defined, we do nothing for this node.", "return", "# Below is untouched code from the original ``visit`` method.", "# Call that method, and show where in the tree it failed if it blows up.", "try", ":", "return", "method", "(", "node", ",", "[", "self", ".", "visit", "(", "n", ")", "for", "n", "in", "node", "]", ")", "except", "(", "VisitationError", ",", "UndefinedLabel", ")", ":", "# Don't catch and re-wrap already-wrapped exceptions.", "raise", "except", "self", ".", "unwrapped_exceptions", ":", "raise", "except", "Exception", ":", "# Catch any exception, and tack on a parse tree so it's easier to", "# see where it went wrong.", "exc_class", ",", "exc", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "raise", "VisitationError", "(", "exc", ",", "exc_class", ",", "node", ")", ".", "with_traceback", "(", "trace", ")" ]
Rewrite original method to use lower-case method, and not "generic" function.
[ "Rewrite", "original", "method", "to", "use", "lower", "-", "case", "method", "and", "not", "generic", "function", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/base.py#L271-L295
245,047
twidi/py-dataql
dataql/parsers/base.py
BaseParser.visit_str
def visit_str(self, node, _): """Regex rule for quoted string allowing escaped quotes inside. Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The wanted string, with quoted characters unquoted. Example ------- >>> BaseParser('"foo"', default_rule='STR').data 'foo' >>> BaseParser("'foo'", default_rule='STR').data 'foo' >>> BaseParser('''"foo b'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser('''"foo b\'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data "foo b'ar" Notes ----- The regex works this way: Two quotes (single or double, the starting one and the ending one should be the same) surrounding zero or more of "any character that's not a quote (same as the starting/ending ones) or a backslash" or "a backslash followed by any character". """ # remove surrounding quotes and remove single backslashes return self.visit_str.re_single_backslash.sub('', node.text[1:-1])
python
def visit_str(self, node, _): """Regex rule for quoted string allowing escaped quotes inside. Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The wanted string, with quoted characters unquoted. Example ------- >>> BaseParser('"foo"', default_rule='STR').data 'foo' >>> BaseParser("'foo'", default_rule='STR').data 'foo' >>> BaseParser('''"foo b'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser('''"foo b\'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data "foo b'ar" Notes ----- The regex works this way: Two quotes (single or double, the starting one and the ending one should be the same) surrounding zero or more of "any character that's not a quote (same as the starting/ending ones) or a backslash" or "a backslash followed by any character". """ # remove surrounding quotes and remove single backslashes return self.visit_str.re_single_backslash.sub('', node.text[1:-1])
[ "def", "visit_str", "(", "self", ",", "node", ",", "_", ")", ":", "# remove surrounding quotes and remove single backslashes", "return", "self", ".", "visit_str", ".", "re_single_backslash", ".", "sub", "(", "''", ",", "node", ".", "text", "[", "1", ":", "-", "1", "]", ")" ]
Regex rule for quoted string allowing escaped quotes inside. Arguments --------- node : parsimonious.nodes.Node. _ (children) : list, unused Result ------ str The wanted string, with quoted characters unquoted. Example ------- >>> BaseParser('"foo"', default_rule='STR').data 'foo' >>> BaseParser("'foo'", default_rule='STR').data 'foo' >>> BaseParser('''"foo b'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser('''"foo b\'ar"''', default_rule='STR').data "foo b'ar" >>> BaseParser(r"'foo b\\'ar'", default_rule='STR').data "foo b'ar" Notes ----- The regex works this way: Two quotes (single or double, the starting one and the ending one should be the same) surrounding zero or more of "any character that's not a quote (same as the starting/ending ones) or a backslash" or "a backslash followed by any character".
[ "Regex", "rule", "for", "quoted", "string", "allowing", "escaped", "quotes", "inside", "." ]
5841a3fd559829193ed709c255166085bdde1c52
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/parsers/base.py#L406-L444
245,048
nefarioustim/parker
parker/fileops.py
get_chunk_path_from_string
def get_chunk_path_from_string(string, chunk=3): """Return a chunked path from string.""" return os.path.join( *list(generate_chunks( string, chunk )) )
python
def get_chunk_path_from_string(string, chunk=3): """Return a chunked path from string.""" return os.path.join( *list(generate_chunks( string, chunk )) )
[ "def", "get_chunk_path_from_string", "(", "string", ",", "chunk", "=", "3", ")", ":", "return", "os", ".", "path", ".", "join", "(", "*", "list", "(", "generate_chunks", "(", "string", ",", "chunk", ")", ")", ")" ]
Return a chunked path from string.
[ "Return", "a", "chunked", "path", "from", "string", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/fileops.py#L60-L67
245,049
RonenNess/Fileter
fileter/sources/folder_source.py
FolderSource.next
def next(self): """ Return all files in folder. """ # get depth of starting root directory base_depth = self.__root.count(os.path.sep) # walk files and folders for root, subFolders, files in os.walk(self.__root): # apply folder filter if not self.filter_folder(root): continue # make sure we don't pass depth limit if self.__depth_limit is not None: curr_depth = root.count(os.path.sep) if curr_depth - base_depth > self.__depth_limit: continue # if need to return folders return it if self.__ret_folders: yield root # return files if self.__ret_files: for f in files: yield os.path.join(root, f) # end iterator raise StopIteration
python
def next(self): """ Return all files in folder. """ # get depth of starting root directory base_depth = self.__root.count(os.path.sep) # walk files and folders for root, subFolders, files in os.walk(self.__root): # apply folder filter if not self.filter_folder(root): continue # make sure we don't pass depth limit if self.__depth_limit is not None: curr_depth = root.count(os.path.sep) if curr_depth - base_depth > self.__depth_limit: continue # if need to return folders return it if self.__ret_folders: yield root # return files if self.__ret_files: for f in files: yield os.path.join(root, f) # end iterator raise StopIteration
[ "def", "next", "(", "self", ")", ":", "# get depth of starting root directory", "base_depth", "=", "self", ".", "__root", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "# walk files and folders", "for", "root", ",", "subFolders", ",", "files", "in", "os", ".", "walk", "(", "self", ".", "__root", ")", ":", "# apply folder filter", "if", "not", "self", ".", "filter_folder", "(", "root", ")", ":", "continue", "# make sure we don't pass depth limit", "if", "self", ".", "__depth_limit", "is", "not", "None", ":", "curr_depth", "=", "root", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "if", "curr_depth", "-", "base_depth", ">", "self", ".", "__depth_limit", ":", "continue", "# if need to return folders return it", "if", "self", ".", "__ret_folders", ":", "yield", "root", "# return files", "if", "self", ".", "__ret_files", ":", "for", "f", "in", "files", ":", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "# end iterator", "raise", "StopIteration" ]
Return all files in folder.
[ "Return", "all", "files", "in", "folder", "." ]
5372221b4049d5d46a9926573b91af17681c81f3
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/sources/folder_source.py#L41-L71
245,050
quasipedia/swaggery
examples/vetinari/vetinari.py
Clock.ticks
def ticks( cls, request, length: (Ptypes.path, Integer('Duration of the stream, in seconds.')), style: (Ptypes.path, String('Tick style.', enum=['compact', 'extended'])) ) -> [ (200, 'Ok', TickStream), (400, 'Invalid parameters') ]: '''A streaming Lord Vetinari clock...''' try: length = int(length) style = cls._styles[style] except (ValueError, KeyError): Respond(400) def vetinari_clock(): start = time() while time() - start <= length: sleep(randint(25, 400) / 100) yield strftime(style, localtime()) Respond(200, vetinari_clock())
python
def ticks( cls, request, length: (Ptypes.path, Integer('Duration of the stream, in seconds.')), style: (Ptypes.path, String('Tick style.', enum=['compact', 'extended'])) ) -> [ (200, 'Ok', TickStream), (400, 'Invalid parameters') ]: '''A streaming Lord Vetinari clock...''' try: length = int(length) style = cls._styles[style] except (ValueError, KeyError): Respond(400) def vetinari_clock(): start = time() while time() - start <= length: sleep(randint(25, 400) / 100) yield strftime(style, localtime()) Respond(200, vetinari_clock())
[ "def", "ticks", "(", "cls", ",", "request", ",", "length", ":", "(", "Ptypes", ".", "path", ",", "Integer", "(", "'Duration of the stream, in seconds.'", ")", ")", ",", "style", ":", "(", "Ptypes", ".", "path", ",", "String", "(", "'Tick style.'", ",", "enum", "=", "[", "'compact'", ",", "'extended'", "]", ")", ")", ")", "->", "[", "(", "200", ",", "'Ok'", ",", "TickStream", ")", ",", "(", "400", ",", "'Invalid parameters'", ")", "]", ":", "try", ":", "length", "=", "int", "(", "length", ")", "style", "=", "cls", ".", "_styles", "[", "style", "]", "except", "(", "ValueError", ",", "KeyError", ")", ":", "Respond", "(", "400", ")", "def", "vetinari_clock", "(", ")", ":", "start", "=", "time", "(", ")", "while", "time", "(", ")", "-", "start", "<=", "length", ":", "sleep", "(", "randint", "(", "25", ",", "400", ")", "/", "100", ")", "yield", "strftime", "(", "style", ",", "localtime", "(", ")", ")", "Respond", "(", "200", ",", "vetinari_clock", "(", ")", ")" ]
A streaming Lord Vetinari clock...
[ "A", "streaming", "Lord", "Vetinari", "clock", "..." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/examples/vetinari/vetinari.py#L39-L58
245,051
carlosp420/dataset-creator
dataset_creator/mega.py
MegaDatasetBlock.convert_blocks_to_string
def convert_blocks_to_string(self): """ New method, only in MegaDatasetBlock class. :return: flattened data blocks as string """ taxa_ids = [[]] * int(self.data.number_taxa) sequences = [''] * int(self.data.number_taxa) for block in self._blocks: for index, seq_record in enumerate(block): taxa_ids[index] = '{0}_{1}_{2}'.format(seq_record.voucher_code, seq_record.taxonomy['genus'], seq_record.taxonomy['species'], ) sequence = get_seq(seq_record, self.codon_positions, aminoacids=self.aminoacids, degenerate=self.degenerate) sequences[index] += sequence.seq if sequence.warning: self.warnings.append(sequence.warning) out = '' for index, value in enumerate(taxa_ids): out += '#{0}\n{1}\n'.format(taxa_ids[index], sequences[index]) return out
python
def convert_blocks_to_string(self): """ New method, only in MegaDatasetBlock class. :return: flattened data blocks as string """ taxa_ids = [[]] * int(self.data.number_taxa) sequences = [''] * int(self.data.number_taxa) for block in self._blocks: for index, seq_record in enumerate(block): taxa_ids[index] = '{0}_{1}_{2}'.format(seq_record.voucher_code, seq_record.taxonomy['genus'], seq_record.taxonomy['species'], ) sequence = get_seq(seq_record, self.codon_positions, aminoacids=self.aminoacids, degenerate=self.degenerate) sequences[index] += sequence.seq if sequence.warning: self.warnings.append(sequence.warning) out = '' for index, value in enumerate(taxa_ids): out += '#{0}\n{1}\n'.format(taxa_ids[index], sequences[index]) return out
[ "def", "convert_blocks_to_string", "(", "self", ")", ":", "taxa_ids", "=", "[", "[", "]", "]", "*", "int", "(", "self", ".", "data", ".", "number_taxa", ")", "sequences", "=", "[", "''", "]", "*", "int", "(", "self", ".", "data", ".", "number_taxa", ")", "for", "block", "in", "self", ".", "_blocks", ":", "for", "index", ",", "seq_record", "in", "enumerate", "(", "block", ")", ":", "taxa_ids", "[", "index", "]", "=", "'{0}_{1}_{2}'", ".", "format", "(", "seq_record", ".", "voucher_code", ",", "seq_record", ".", "taxonomy", "[", "'genus'", "]", ",", "seq_record", ".", "taxonomy", "[", "'species'", "]", ",", ")", "sequence", "=", "get_seq", "(", "seq_record", ",", "self", ".", "codon_positions", ",", "aminoacids", "=", "self", ".", "aminoacids", ",", "degenerate", "=", "self", ".", "degenerate", ")", "sequences", "[", "index", "]", "+=", "sequence", ".", "seq", "if", "sequence", ".", "warning", ":", "self", ".", "warnings", ".", "append", "(", "sequence", ".", "warning", ")", "out", "=", "''", "for", "index", ",", "value", "in", "enumerate", "(", "taxa_ids", ")", ":", "out", "+=", "'#{0}\\n{1}\\n'", ".", "format", "(", "taxa_ids", "[", "index", "]", ",", "sequences", "[", "index", "]", ")", "return", "out" ]
New method, only in MegaDatasetBlock class. :return: flattened data blocks as string
[ "New", "method", "only", "in", "MegaDatasetBlock", "class", "." ]
ea27340b145cb566a36c1836ff42263f1b2003a0
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/mega.py#L10-L35
245,052
etcher-be/emiz
emiz/avwx/metar.py
parse
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore """ Returns MetarData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt)
python
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore """ Returns MetarData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt)
[ "def", "parse", "(", "station", ":", "str", ",", "txt", ":", "str", ")", "->", "(", "MetarData", ",", "Units", ")", ":", "# type: ignore", "core", ".", "valid_station", "(", "station", ")", "return", "parse_na", "(", "txt", ")", "if", "core", ".", "uses_na_format", "(", "station", "[", ":", "2", "]", ")", "else", "parse_in", "(", "txt", ")" ]
Returns MetarData and Units dataclasses with parsed data and their associated units
[ "Returns", "MetarData", "and", "Units", "dataclasses", "with", "parsed", "data", "and", "their", "associated", "units" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/metar.py#L23-L28
245,053
etcher-be/emiz
emiz/avwx/metar.py
parse_na
def parse_na(txt: str) -> (MetarData, Units): # type: ignore """ Parser for the North American METAR variant """ units = Units(**NA_UNITS) # type: ignore clean = core.sanitize_report_string(txt) wxresp = {'raw': txt, 'sanitized': clean} wxdata, wxresp['remarks'] = core.get_remarks(clean) wxdata, wxresp['runway_visibility'], _ = core.sanitize_report_list(wxdata) wxdata, wxresp['station'], wxresp['time'] = core.get_station_and_time(wxdata) wxdata, wxresp['clouds'] = core.get_clouds(wxdata) wxdata, wxresp['wind_direction'], wxresp['wind_speed'], \ wxresp['wind_gust'], wxresp['wind_variable_direction'] = core.get_wind(wxdata, units) wxdata, wxresp['altimeter'] = core.get_altimeter(wxdata, units, 'NA') wxdata, wxresp['visibility'] = core.get_visibility(wxdata, units) wxresp['other'], wxresp['temperature'], wxresp['dewpoint'] = core.get_temp_and_dew(wxdata) condition = core.get_flight_rules(wxresp['visibility'], core.get_ceiling(wxresp['clouds'])) # type: ignore wxresp['flight_rules'] = FLIGHT_RULES[condition] wxresp['remarks_info'] = remarks.parse(wxresp['remarks']) # type: ignore wxresp['time'] = core.make_timestamp(wxresp['time']) # type: ignore return MetarData(**wxresp), units
python
def parse_na(txt: str) -> (MetarData, Units): # type: ignore """ Parser for the North American METAR variant """ units = Units(**NA_UNITS) # type: ignore clean = core.sanitize_report_string(txt) wxresp = {'raw': txt, 'sanitized': clean} wxdata, wxresp['remarks'] = core.get_remarks(clean) wxdata, wxresp['runway_visibility'], _ = core.sanitize_report_list(wxdata) wxdata, wxresp['station'], wxresp['time'] = core.get_station_and_time(wxdata) wxdata, wxresp['clouds'] = core.get_clouds(wxdata) wxdata, wxresp['wind_direction'], wxresp['wind_speed'], \ wxresp['wind_gust'], wxresp['wind_variable_direction'] = core.get_wind(wxdata, units) wxdata, wxresp['altimeter'] = core.get_altimeter(wxdata, units, 'NA') wxdata, wxresp['visibility'] = core.get_visibility(wxdata, units) wxresp['other'], wxresp['temperature'], wxresp['dewpoint'] = core.get_temp_and_dew(wxdata) condition = core.get_flight_rules(wxresp['visibility'], core.get_ceiling(wxresp['clouds'])) # type: ignore wxresp['flight_rules'] = FLIGHT_RULES[condition] wxresp['remarks_info'] = remarks.parse(wxresp['remarks']) # type: ignore wxresp['time'] = core.make_timestamp(wxresp['time']) # type: ignore return MetarData(**wxresp), units
[ "def", "parse_na", "(", "txt", ":", "str", ")", "->", "(", "MetarData", ",", "Units", ")", ":", "# type: ignore", "units", "=", "Units", "(", "*", "*", "NA_UNITS", ")", "# type: ignore", "clean", "=", "core", ".", "sanitize_report_string", "(", "txt", ")", "wxresp", "=", "{", "'raw'", ":", "txt", ",", "'sanitized'", ":", "clean", "}", "wxdata", ",", "wxresp", "[", "'remarks'", "]", "=", "core", ".", "get_remarks", "(", "clean", ")", "wxdata", ",", "wxresp", "[", "'runway_visibility'", "]", ",", "_", "=", "core", ".", "sanitize_report_list", "(", "wxdata", ")", "wxdata", ",", "wxresp", "[", "'station'", "]", ",", "wxresp", "[", "'time'", "]", "=", "core", ".", "get_station_and_time", "(", "wxdata", ")", "wxdata", ",", "wxresp", "[", "'clouds'", "]", "=", "core", ".", "get_clouds", "(", "wxdata", ")", "wxdata", ",", "wxresp", "[", "'wind_direction'", "]", ",", "wxresp", "[", "'wind_speed'", "]", ",", "wxresp", "[", "'wind_gust'", "]", ",", "wxresp", "[", "'wind_variable_direction'", "]", "=", "core", ".", "get_wind", "(", "wxdata", ",", "units", ")", "wxdata", ",", "wxresp", "[", "'altimeter'", "]", "=", "core", ".", "get_altimeter", "(", "wxdata", ",", "units", ",", "'NA'", ")", "wxdata", ",", "wxresp", "[", "'visibility'", "]", "=", "core", ".", "get_visibility", "(", "wxdata", ",", "units", ")", "wxresp", "[", "'other'", "]", ",", "wxresp", "[", "'temperature'", "]", ",", "wxresp", "[", "'dewpoint'", "]", "=", "core", ".", "get_temp_and_dew", "(", "wxdata", ")", "condition", "=", "core", ".", "get_flight_rules", "(", "wxresp", "[", "'visibility'", "]", ",", "core", ".", "get_ceiling", "(", "wxresp", "[", "'clouds'", "]", ")", ")", "# type: ignore", "wxresp", "[", "'flight_rules'", "]", "=", "FLIGHT_RULES", "[", "condition", "]", "wxresp", "[", "'remarks_info'", "]", "=", "remarks", ".", "parse", "(", "wxresp", "[", "'remarks'", "]", ")", "# type: ignore", "wxresp", "[", "'time'", "]", "=", "core", ".", "make_timestamp", "(", "wxresp", "[", "'time'", "]", ")", "# type: ignore", "return", "MetarData", "(", "*", "*", "wxresp", ")", ",", "units" ]
Parser for the North American METAR variant
[ "Parser", "for", "the", "North", "American", "METAR", "variant" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/metar.py#L31-L51
245,054
GemHQ/round-py
round/wrappers.py
MFAable.with_mfa
def with_mfa(self, mfa_token): """Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: account.with_mfa(application.totp.now()).pay(...) Args: mfa_token (str/function, optional): TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self """ if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2 self.context.mfa_token = mfa_token.__call__() else: self.context.mfa_token = mfa_token return self
python
def with_mfa(self, mfa_token): """Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: account.with_mfa(application.totp.now()).pay(...) Args: mfa_token (str/function, optional): TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self """ if hasattr(mfa_token, '__call__'): # callable() is unsupported by 3.1 and 3.2 self.context.mfa_token = mfa_token.__call__() else: self.context.mfa_token = mfa_token return self
[ "def", "with_mfa", "(", "self", ",", "mfa_token", ")", ":", "if", "hasattr", "(", "mfa_token", ",", "'__call__'", ")", ":", "# callable() is unsupported by 3.1 and 3.2", "self", ".", "context", ".", "mfa_token", "=", "mfa_token", ".", "__call__", "(", ")", "else", ":", "self", ".", "context", ".", "mfa_token", "=", "mfa_token", "return", "self" ]
Set the MFA token for the next request. `mfa_token`s are only good for one request. Use this method to chain into the protected action you want to perform. Note: Only useful for Application authentication. Usage: account.with_mfa(application.totp.now()).pay(...) Args: mfa_token (str/function, optional): TOTP token for the Application OR a callable/function which will generate such a token when called. Returns: self
[ "Set", "the", "MFA", "token", "for", "the", "next", "request", ".", "mfa_token", "s", "are", "only", "good", "for", "one", "request", ".", "Use", "this", "method", "to", "chain", "into", "the", "protected", "action", "you", "want", "to", "perform", "." ]
d0838f849cd260b1eb5df67ed3c6f2fe56c91c21
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/wrappers.py#L37-L57
245,055
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
resolved_row
def resolved_row(objs, geomatcher): """Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.""" def get_locations(lst): for elem in lst: try: yield elem['location'] except TypeError: yield elem geomatcher['RoW'] = geomatcher.faces.difference( reduce( set.union, [geomatcher[obj] for obj in get_locations(objs)] ) ) yield geomatcher del geomatcher['RoW']
python
def resolved_row(objs, geomatcher): """Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.""" def get_locations(lst): for elem in lst: try: yield elem['location'] except TypeError: yield elem geomatcher['RoW'] = geomatcher.faces.difference( reduce( set.union, [geomatcher[obj] for obj in get_locations(objs)] ) ) yield geomatcher del geomatcher['RoW']
[ "def", "resolved_row", "(", "objs", ",", "geomatcher", ")", ":", "def", "get_locations", "(", "lst", ")", ":", "for", "elem", "in", "lst", ":", "try", ":", "yield", "elem", "[", "'location'", "]", "except", "TypeError", ":", "yield", "elem", "geomatcher", "[", "'RoW'", "]", "=", "geomatcher", ".", "faces", ".", "difference", "(", "reduce", "(", "set", ".", "union", ",", "[", "geomatcher", "[", "obj", "]", "for", "obj", "in", "get_locations", "(", "objs", ")", "]", ")", ")", "yield", "geomatcher", "del", "geomatcher", "[", "'RoW'", "]" ]
Temporarily insert ``RoW`` into ``geomatcher.topology``, defined by the topo faces not used in ``objs``. Will overwrite any existing ``RoW``. On exiting the context manager, ``RoW`` is deleted.
[ "Temporarily", "insert", "RoW", "into", "geomatcher", ".", "topology", "defined", "by", "the", "topo", "faces", "not", "used", "in", "objs", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L260-L280
245,056
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher._actual_key
def _actual_key(self, key): """Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.""" if key in self or key in ("RoW", "GLO"): return key elif (self.default_namespace, key) in self: return (self.default_namespace, key) if isinstance(key, str) and self.coco: new = coco.convert(names=[key], to='ISO2', not_found=None) if new in self: if new not in self.__seen: self.__seen.add(key) print("Geomatcher: Used '{}' for '{}'".format(new, key)) return new raise KeyError("Can't find this location")
python
def _actual_key(self, key): """Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.""" if key in self or key in ("RoW", "GLO"): return key elif (self.default_namespace, key) in self: return (self.default_namespace, key) if isinstance(key, str) and self.coco: new = coco.convert(names=[key], to='ISO2', not_found=None) if new in self: if new not in self.__seen: self.__seen.add(key) print("Geomatcher: Used '{}' for '{}'".format(new, key)) return new raise KeyError("Can't find this location")
[ "def", "_actual_key", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", "or", "key", "in", "(", "\"RoW\"", ",", "\"GLO\"", ")", ":", "return", "key", "elif", "(", "self", ".", "default_namespace", ",", "key", ")", "in", "self", ":", "return", "(", "self", ".", "default_namespace", ",", "key", ")", "if", "isinstance", "(", "key", ",", "str", ")", "and", "self", ".", "coco", ":", "new", "=", "coco", ".", "convert", "(", "names", "=", "[", "key", "]", ",", "to", "=", "'ISO2'", ",", "not_found", "=", "None", ")", "if", "new", "in", "self", ":", "if", "new", "not", "in", "self", ".", "__seen", ":", "self", ".", "__seen", ".", "add", "(", "key", ")", "print", "(", "\"Geomatcher: Used '{}' for '{}'\"", ".", "format", "(", "new", ",", "key", ")", ")", "return", "new", "raise", "KeyError", "(", "\"Can't find this location\"", ")" ]
Translate provided key into the key used in the topology. Tries the unmodified key, the key with the default namespace, and the country converter. Raises a ``KeyError`` if none of these finds a suitable definition in ``self.topology``.
[ "Translate", "provided", "key", "into", "the", "key", "used", "in", "the", "topology", ".", "Tries", "the", "unmodified", "key", "the", "key", "with", "the", "default", "namespace", "and", "the", "country", "converter", ".", "Raises", "a", "KeyError", "if", "none", "of", "these", "finds", "a", "suitable", "definition", "in", "self", ".", "topology", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L87-L102
245,057
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher._finish_filter
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first): """Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.""" key = self._actual_key(key) locations = [x[0] for x in lst] if not include_self and key in locations: lst.pop(locations.index(key)) lst.sort(key=lambda x: x[1], reverse=biggest_first) lst = [x for x, y in lst] # RoW in both key and lst, but not defined; only RoW remains if exclusive if key == 'RoW' and 'RoW' not in self and exclusive: return ['RoW'] if 'RoW' in lst else [] elif exclusive: removed, remaining = set(), [] while lst: current = lst.pop(0) faces = self[current] if not faces.intersection(removed): removed.update(faces) remaining.append(current) lst = remaining # If RoW not resolved, make it the smallest if 'RoW' not in self and 'RoW' in lst: lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW')) return lst
python
def _finish_filter(self, lst, key, include_self, exclusive, biggest_first): """Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.""" key = self._actual_key(key) locations = [x[0] for x in lst] if not include_self and key in locations: lst.pop(locations.index(key)) lst.sort(key=lambda x: x[1], reverse=biggest_first) lst = [x for x, y in lst] # RoW in both key and lst, but not defined; only RoW remains if exclusive if key == 'RoW' and 'RoW' not in self and exclusive: return ['RoW'] if 'RoW' in lst else [] elif exclusive: removed, remaining = set(), [] while lst: current = lst.pop(0) faces = self[current] if not faces.intersection(removed): removed.update(faces) remaining.append(current) lst = remaining # If RoW not resolved, make it the smallest if 'RoW' not in self and 'RoW' in lst: lst[-1 if biggest_first else 0] = lst.pop(lst.index('RoW')) return lst
[ "def", "_finish_filter", "(", "self", ",", "lst", ",", "key", ",", "include_self", ",", "exclusive", ",", "biggest_first", ")", ":", "key", "=", "self", ".", "_actual_key", "(", "key", ")", "locations", "=", "[", "x", "[", "0", "]", "for", "x", "in", "lst", "]", "if", "not", "include_self", "and", "key", "in", "locations", ":", "lst", ".", "pop", "(", "locations", ".", "index", "(", "key", ")", ")", "lst", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "biggest_first", ")", "lst", "=", "[", "x", "for", "x", ",", "y", "in", "lst", "]", "# RoW in both key and lst, but not defined; only RoW remains if exclusive", "if", "key", "==", "'RoW'", "and", "'RoW'", "not", "in", "self", "and", "exclusive", ":", "return", "[", "'RoW'", "]", "if", "'RoW'", "in", "lst", "else", "[", "]", "elif", "exclusive", ":", "removed", ",", "remaining", "=", "set", "(", ")", ",", "[", "]", "while", "lst", ":", "current", "=", "lst", ".", "pop", "(", "0", ")", "faces", "=", "self", "[", "current", "]", "if", "not", "faces", ".", "intersection", "(", "removed", ")", ":", "removed", ".", "update", "(", "faces", ")", "remaining", ".", "append", "(", "current", ")", "lst", "=", "remaining", "# If RoW not resolved, make it the smallest", "if", "'RoW'", "not", "in", "self", "and", "'RoW'", "in", "lst", ":", "lst", "[", "-", "1", "if", "biggest_first", "else", "0", "]", "=", "lst", ".", "pop", "(", "lst", ".", "index", "(", "'RoW'", ")", ")", "return", "lst" ]
Finish filtering a GIS operation. Can optionally exclude the input key, sort results, and exclude overlapping results. Internal function, not normally called directly.
[ "Finish", "filtering", "a", "GIS", "operation", ".", "Can", "optionally", "exclude", "the", "input", "key", "sort", "results", "and", "exclude", "overlapping", "results", ".", "Internal", "function", "not", "normally", "called", "directly", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L104-L132
245,058
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher.intersects
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None): """Get all locations that intersect this location. Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing. """ possibles = self.topology if only is None else {k: self[k] for k in only} if key == 'RoW' and 'RoW' not in self: return ['RoW'] if 'RoW' in possibles else [] faces = self[key] lst = [ (k, (len(v.intersection(faces)), len(v))) for k, v in possibles.items() if (faces.intersection(v)) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
python
def intersects(self, key, include_self=False, exclusive=False, biggest_first=True, only=None): """Get all locations that intersect this location. Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing. """ possibles = self.topology if only is None else {k: self[k] for k in only} if key == 'RoW' and 'RoW' not in self: return ['RoW'] if 'RoW' in possibles else [] faces = self[key] lst = [ (k, (len(v.intersection(faces)), len(v))) for k, v in possibles.items() if (faces.intersection(v)) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
[ "def", "intersects", "(", "self", ",", "key", ",", "include_self", "=", "False", ",", "exclusive", "=", "False", ",", "biggest_first", "=", "True", ",", "only", "=", "None", ")", ":", "possibles", "=", "self", ".", "topology", "if", "only", "is", "None", "else", "{", "k", ":", "self", "[", "k", "]", "for", "k", "in", "only", "}", "if", "key", "==", "'RoW'", "and", "'RoW'", "not", "in", "self", ":", "return", "[", "'RoW'", "]", "if", "'RoW'", "in", "possibles", "else", "[", "]", "faces", "=", "self", "[", "key", "]", "lst", "=", "[", "(", "k", ",", "(", "len", "(", "v", ".", "intersection", "(", "faces", ")", ")", ",", "len", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "possibles", ".", "items", "(", ")", "if", "(", "faces", ".", "intersection", "(", "v", ")", ")", "]", "return", "self", ".", "_finish_filter", "(", "lst", ",", "key", ",", "include_self", ",", "exclusive", ",", "biggest_first", ")" ]
Get all locations that intersect this location. Note that sorting is done by first by number of faces intersecting ``key``; the total number of faces in the intersected region is only used to break sorting ties. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition, and therefore nothing intersects it. ``.intersects("RoW")`` returns a list with with ``RoW`` or nothing.
[ "Get", "all", "locations", "that", "intersect", "this", "location", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L134-L153
245,059
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher.contained
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None): """Get all locations that are completely within this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing. """ if 'RoW' not in self: if key == 'RoW': return ['RoW'] if 'RoW' in (only or []) else [] elif only and 'RoW' in only: only.pop(only.index('RoW')) possibles = self.topology if only is None else {k: self[k] for k in only} faces = self[key] lst = [ (k, len(v)) for k, v in possibles.items() if v and faces.issuperset(v) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
python
def contained(self, key, include_self=True, exclusive=False, biggest_first=True, only=None): """Get all locations that are completely within this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing. """ if 'RoW' not in self: if key == 'RoW': return ['RoW'] if 'RoW' in (only or []) else [] elif only and 'RoW' in only: only.pop(only.index('RoW')) possibles = self.topology if only is None else {k: self[k] for k in only} faces = self[key] lst = [ (k, len(v)) for k, v in possibles.items() if v and faces.issuperset(v) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
[ "def", "contained", "(", "self", ",", "key", ",", "include_self", "=", "True", ",", "exclusive", "=", "False", ",", "biggest_first", "=", "True", ",", "only", "=", "None", ")", ":", "if", "'RoW'", "not", "in", "self", ":", "if", "key", "==", "'RoW'", ":", "return", "[", "'RoW'", "]", "if", "'RoW'", "in", "(", "only", "or", "[", "]", ")", "else", "[", "]", "elif", "only", "and", "'RoW'", "in", "only", ":", "only", ".", "pop", "(", "only", ".", "index", "(", "'RoW'", ")", ")", "possibles", "=", "self", ".", "topology", "if", "only", "is", "None", "else", "{", "k", ":", "self", "[", "k", "]", "for", "k", "in", "only", "}", "faces", "=", "self", "[", "key", "]", "lst", "=", "[", "(", "k", ",", "len", "(", "v", ")", ")", "for", "k", ",", "v", "in", "possibles", ".", "items", "(", ")", "if", "v", "and", "faces", ".", "issuperset", "(", "v", ")", "]", "return", "self", ".", "_finish_filter", "(", "lst", ",", "key", ",", "include_self", ",", "exclusive", ",", "biggest_first", ")" ]
Get all locations that are completely within this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``.contained("RoW")`` returns a list with either ``RoW`` or nothing.
[ "Get", "all", "locations", "that", "are", "completely", "within", "this", "location", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L155-L175
245,060
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher.within
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None): """Get all locations that completely contain this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``. """ possibles = self.topology if only is None else {k: self[k] for k in only} _ = lambda key: [key] if key in possibles else [] if 'RoW' not in self and key == 'RoW': answer = [] + _('RoW') + _('GLO') return list(reversed(answer)) if biggest_first else answer faces = self[key] lst = [ (k, len(v)) for k, v in possibles.items() if faces.issubset(v) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
python
def within(self, key, include_self=True, exclusive=False, biggest_first=True, only=None): """Get all locations that completely contain this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``. """ possibles = self.topology if only is None else {k: self[k] for k in only} _ = lambda key: [key] if key in possibles else [] if 'RoW' not in self and key == 'RoW': answer = [] + _('RoW') + _('GLO') return list(reversed(answer)) if biggest_first else answer faces = self[key] lst = [ (k, len(v)) for k, v in possibles.items() if faces.issubset(v) ] return self._finish_filter(lst, key, include_self, exclusive, biggest_first)
[ "def", "within", "(", "self", ",", "key", ",", "include_self", "=", "True", ",", "exclusive", "=", "False", ",", "biggest_first", "=", "True", ",", "only", "=", "None", ")", ":", "possibles", "=", "self", ".", "topology", "if", "only", "is", "None", "else", "{", "k", ":", "self", "[", "k", "]", "for", "k", "in", "only", "}", "_", "=", "lambda", "key", ":", "[", "key", "]", "if", "key", "in", "possibles", "else", "[", "]", "if", "'RoW'", "not", "in", "self", "and", "key", "==", "'RoW'", ":", "answer", "=", "[", "]", "+", "_", "(", "'RoW'", ")", "+", "_", "(", "'GLO'", ")", "return", "list", "(", "reversed", "(", "answer", ")", ")", "if", "biggest_first", "else", "answer", "faces", "=", "self", "[", "key", "]", "lst", "=", "[", "(", "k", ",", "len", "(", "v", ")", ")", "for", "k", ",", "v", "in", "possibles", ".", "items", "(", ")", "if", "faces", ".", "issubset", "(", "v", ")", "]", "return", "self", ".", "_finish_filter", "(", "lst", ",", "key", ",", "include_self", ",", "exclusive", ",", "biggest_first", ")" ]
Get all locations that completely contain this location. If the ``resolved_row`` context manager is not used, ``RoW`` doesn't have a spatial definition. Therefore, ``RoW`` can only be contained by ``GLO`` and ``RoW``.
[ "Get", "all", "locations", "that", "completely", "contain", "this", "location", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L177-L195
245,061
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher.split_face
def split_face(self, face, number=None, ids=None): """Split a topological face into a number of small faces. * ``face``: The face to split. Must be in the topology. * ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces. * ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored. Returns the new face ids. """ assert face in self.faces if ids: ids = set(ids) else: max_int = max(x for x in self.faces if isinstance(x, int)) ids = set(range(max_int + 1, max_int + 1 + (number or 2))) for obj in self.topology.values(): if face in obj: obj.discard(face) obj.update(ids) self.faces.discard(face) self.faces.update(ids) return ids
python
def split_face(self, face, number=None, ids=None): """Split a topological face into a number of small faces. * ``face``: The face to split. Must be in the topology. * ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces. * ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored. Returns the new face ids. """ assert face in self.faces if ids: ids = set(ids) else: max_int = max(x for x in self.faces if isinstance(x, int)) ids = set(range(max_int + 1, max_int + 1 + (number or 2))) for obj in self.topology.values(): if face in obj: obj.discard(face) obj.update(ids) self.faces.discard(face) self.faces.update(ids) return ids
[ "def", "split_face", "(", "self", ",", "face", ",", "number", "=", "None", ",", "ids", "=", "None", ")", ":", "assert", "face", "in", "self", ".", "faces", "if", "ids", ":", "ids", "=", "set", "(", "ids", ")", "else", ":", "max_int", "=", "max", "(", "x", "for", "x", "in", "self", ".", "faces", "if", "isinstance", "(", "x", ",", "int", ")", ")", "ids", "=", "set", "(", "range", "(", "max_int", "+", "1", ",", "max_int", "+", "1", "+", "(", "number", "or", "2", ")", ")", ")", "for", "obj", "in", "self", ".", "topology", ".", "values", "(", ")", ":", "if", "face", "in", "obj", ":", "obj", ".", "discard", "(", "face", ")", "obj", ".", "update", "(", "ids", ")", "self", ".", "faces", ".", "discard", "(", "face", ")", "self", ".", "faces", ".", "update", "(", "ids", ")", "return", "ids" ]
Split a topological face into a number of small faces. * ``face``: The face to split. Must be in the topology. * ``number``: Number of new faces to create. Optional, can be inferred from ``ids``. Default is 2 new faces. * ``ids``: Iterable of new face ids. Optional, default is the maximum integer in the existing topology plus one. ``ids`` don't have to be integers. If ``ids`` is specified, ``number`` is ignored. Returns the new face ids.
[ "Split", "a", "topological", "face", "into", "a", "number", "of", "small", "faces", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L197-L223
245,062
cmutel/constructive_geometries
constructive_geometries/geomatcher.py
Geomatcher.add_definitions
def add_definitions(self, data, namespace, relative=True): """Add new topological definitions to ``self.topology``. If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE: .. code-block:: python {"Russia Region": [ "AM", "AZ", "GE", "RU" ]} Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets: .. code-block:: python { 'A': {1, 2, 3}, 'B': {2, 3, 4}, } """ if not relative: self.topology.update({(namespace, k): v for k, v in data.items()}) self.faces.update(set.union(*data.values())) else: self.topology.update({ (namespace, k): set.union(*[self[o] for o in v]) for k, v in data.items() })
python
def add_definitions(self, data, namespace, relative=True): """Add new topological definitions to ``self.topology``. If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE: .. code-block:: python {"Russia Region": [ "AM", "AZ", "GE", "RU" ]} Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets: .. code-block:: python { 'A': {1, 2, 3}, 'B': {2, 3, 4}, } """ if not relative: self.topology.update({(namespace, k): v for k, v in data.items()}) self.faces.update(set.union(*data.values())) else: self.topology.update({ (namespace, k): set.union(*[self[o] for o in v]) for k, v in data.items() })
[ "def", "add_definitions", "(", "self", ",", "data", ",", "namespace", ",", "relative", "=", "True", ")", ":", "if", "not", "relative", ":", "self", ".", "topology", ".", "update", "(", "{", "(", "namespace", ",", "k", ")", ":", "v", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "}", ")", "self", ".", "faces", ".", "update", "(", "set", ".", "union", "(", "*", "data", ".", "values", "(", ")", ")", ")", "else", ":", "self", ".", "topology", ".", "update", "(", "{", "(", "namespace", ",", "k", ")", ":", "set", ".", "union", "(", "*", "[", "self", "[", "o", "]", "for", "o", "in", "v", "]", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "}", ")" ]
Add new topological definitions to ``self.topology``. If ``relative`` is true, then ``data`` is defined relative to the existing locations already in ``self.topology``, e.g. IMAGE: .. code-block:: python {"Russia Region": [ "AM", "AZ", "GE", "RU" ]} Otherwise, ``data`` is a dictionary with string keys and values of integer topology face id sets: .. code-block:: python { 'A': {1, 2, 3}, 'B': {2, 3, 4}, }
[ "Add", "new", "topological", "definitions", "to", "self", ".", "topology", "." ]
d38d7e8d5bf943a6499f3000004f1953af5970de
https://github.com/cmutel/constructive_geometries/blob/d38d7e8d5bf943a6499f3000004f1953af5970de/constructive_geometries/geomatcher.py#L225-L256
245,063
maxfischer2781/include
include/mount/__init__.py
MountLoader.is_module
def is_module(self, name): """Test that `name` is a module name""" if self.module_prefix.startswith(self.mount_prefix): return name.startswith(self.module_prefix) return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
python
def is_module(self, name): """Test that `name` is a module name""" if self.module_prefix.startswith(self.mount_prefix): return name.startswith(self.module_prefix) return name.startswith(self.module_prefix) and not name.startswith(self.mount_prefix)
[ "def", "is_module", "(", "self", ",", "name", ")", ":", "if", "self", ".", "module_prefix", ".", "startswith", "(", "self", ".", "mount_prefix", ")", ":", "return", "name", ".", "startswith", "(", "self", ".", "module_prefix", ")", "return", "name", ".", "startswith", "(", "self", ".", "module_prefix", ")", "and", "not", "name", ".", "startswith", "(", "self", ".", "mount_prefix", ")" ]
Test that `name` is a module name
[ "Test", "that", "name", "is", "a", "module", "name" ]
d8b0404f4996b6abcd39fdebf282b31fad8bb6f5
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L26-L30
245,064
maxfischer2781/include
include/mount/__init__.py
MountLoader.is_mount
def is_mount(self, name): """Test that `name` is a mount name""" if self.mount_prefix.startswith(self.module_prefix): return name.startswith(self.mount_prefix) return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
python
def is_mount(self, name): """Test that `name` is a mount name""" if self.mount_prefix.startswith(self.module_prefix): return name.startswith(self.mount_prefix) return name.startswith(self.mount_prefix) and not name.startswith(self.module_prefix)
[ "def", "is_mount", "(", "self", ",", "name", ")", ":", "if", "self", ".", "mount_prefix", ".", "startswith", "(", "self", ".", "module_prefix", ")", ":", "return", "name", ".", "startswith", "(", "self", ".", "mount_prefix", ")", "return", "name", ".", "startswith", "(", "self", ".", "mount_prefix", ")", "and", "not", "name", ".", "startswith", "(", "self", ".", "module_prefix", ")" ]
Test that `name` is a mount name
[ "Test", "that", "name", "is", "a", "mount", "name" ]
d8b0404f4996b6abcd39fdebf282b31fad8bb6f5
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L32-L36
245,065
maxfischer2781/include
include/mount/__init__.py
MountLoader.name2mount
def name2mount(self, name): """Convert a module name to a mount name""" if not self.is_module(name): raise ValueError('%r is not a supported module name' % (name, )) return name.replace(self.module_prefix, self.mount_prefix)
python
def name2mount(self, name): """Convert a module name to a mount name""" if not self.is_module(name): raise ValueError('%r is not a supported module name' % (name, )) return name.replace(self.module_prefix, self.mount_prefix)
[ "def", "name2mount", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "is_module", "(", "name", ")", ":", "raise", "ValueError", "(", "'%r is not a supported module name'", "%", "(", "name", ",", ")", ")", "return", "name", ".", "replace", "(", "self", ".", "module_prefix", ",", "self", ".", "mount_prefix", ")" ]
Convert a module name to a mount name
[ "Convert", "a", "module", "name", "to", "a", "mount", "name" ]
d8b0404f4996b6abcd39fdebf282b31fad8bb6f5
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L38-L42
245,066
maxfischer2781/include
include/mount/__init__.py
MountLoader.mount2name
def mount2name(self, mount): """Convert a mount name to a module name""" if not self.is_mount(mount): raise ValueError('%r is not a supported mount name' % (mount,)) return mount.replace(self.mount_prefix, self.module_prefix)
python
def mount2name(self, mount): """Convert a mount name to a module name""" if not self.is_mount(mount): raise ValueError('%r is not a supported mount name' % (mount,)) return mount.replace(self.mount_prefix, self.module_prefix)
[ "def", "mount2name", "(", "self", ",", "mount", ")", ":", "if", "not", "self", ".", "is_mount", "(", "mount", ")", ":", "raise", "ValueError", "(", "'%r is not a supported mount name'", "%", "(", "mount", ",", ")", ")", "return", "mount", ".", "replace", "(", "self", ".", "mount_prefix", ",", "self", ".", "module_prefix", ")" ]
Convert a mount name to a module name
[ "Convert", "a", "mount", "name", "to", "a", "module", "name" ]
d8b0404f4996b6abcd39fdebf282b31fad8bb6f5
https://github.com/maxfischer2781/include/blob/d8b0404f4996b6abcd39fdebf282b31fad8bb6f5/include/mount/__init__.py#L44-L48
245,067
ppo/django-guitar
guitar/templatetags/guitar_tags.py
static_absolute_tag
def static_absolute_tag(context, path): """ Return the absolute URL of a static file. Usage: ``{% %}`` """ request = context.get("request") return urljoin(request.ABSOLUTE_ROOT, static_url(path))
python
def static_absolute_tag(context, path): """ Return the absolute URL of a static file. Usage: ``{% %}`` """ request = context.get("request") return urljoin(request.ABSOLUTE_ROOT, static_url(path))
[ "def", "static_absolute_tag", "(", "context", ",", "path", ")", ":", "request", "=", "context", ".", "get", "(", "\"request\"", ")", "return", "urljoin", "(", "request", ".", "ABSOLUTE_ROOT", ",", "static_url", "(", "path", ")", ")" ]
Return the absolute URL of a static file. Usage: ``{% %}``
[ "Return", "the", "absolute", "URL", "of", "a", "static", "file", "." ]
857282219c0c4ff5907c3ad04ef012281d245348
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/templatetags/guitar_tags.py#L91-L98
245,068
ppo/django-guitar
guitar/templatetags/guitar_tags.py
static_cdn_tag
def static_cdn_tag(path, cdn, cdn_only=False): """ Return the URL of a static file, with handling of offline mode. Usage: ``{% %}`` """ clean_path = path.lstrip("/") if getattr(settings, "OFFLINE", False): return static_url(join("vendor", clean_path)) elif cdn_only: return cdn return urljoin(cdn, clean_path)
python
def static_cdn_tag(path, cdn, cdn_only=False): """ Return the URL of a static file, with handling of offline mode. Usage: ``{% %}`` """ clean_path = path.lstrip("/") if getattr(settings, "OFFLINE", False): return static_url(join("vendor", clean_path)) elif cdn_only: return cdn return urljoin(cdn, clean_path)
[ "def", "static_cdn_tag", "(", "path", ",", "cdn", ",", "cdn_only", "=", "False", ")", ":", "clean_path", "=", "path", ".", "lstrip", "(", "\"/\"", ")", "if", "getattr", "(", "settings", ",", "\"OFFLINE\"", ",", "False", ")", ":", "return", "static_url", "(", "join", "(", "\"vendor\"", ",", "clean_path", ")", ")", "elif", "cdn_only", ":", "return", "cdn", "return", "urljoin", "(", "cdn", ",", "clean_path", ")" ]
Return the URL of a static file, with handling of offline mode. Usage: ``{% %}``
[ "Return", "the", "URL", "of", "a", "static", "file", "with", "handling", "of", "offline", "mode", "." ]
857282219c0c4ff5907c3ad04ef012281d245348
https://github.com/ppo/django-guitar/blob/857282219c0c4ff5907c3ad04ef012281d245348/guitar/templatetags/guitar_tags.py#L102-L113
245,069
sassoo/goldman
goldman/resources/models.py
on_get
def on_get(resc, req, resp): """ Get the models identified by query parameters We return an empty list if no models are found. """ signals.pre_req.send(resc.model) signals.pre_req_search.send(resc.model) models = goldman.sess.store.search(resc.rtype, **{ 'filters': req.filters, 'pages': req.pages, 'sorts': req.sorts, }) props = to_rest_models(models, includes=req.includes) resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_search.send(resc.model)
python
def on_get(resc, req, resp): """ Get the models identified by query parameters We return an empty list if no models are found. """ signals.pre_req.send(resc.model) signals.pre_req_search.send(resc.model) models = goldman.sess.store.search(resc.rtype, **{ 'filters': req.filters, 'pages': req.pages, 'sorts': req.sorts, }) props = to_rest_models(models, includes=req.includes) resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_search.send(resc.model)
[ "def", "on_get", "(", "resc", ",", "req", ",", "resp", ")", ":", "signals", ".", "pre_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "pre_req_search", ".", "send", "(", "resc", ".", "model", ")", "models", "=", "goldman", ".", "sess", ".", "store", ".", "search", "(", "resc", ".", "rtype", ",", "*", "*", "{", "'filters'", ":", "req", ".", "filters", ",", "'pages'", ":", "req", ".", "pages", ",", "'sorts'", ":", "req", ".", "sorts", ",", "}", ")", "props", "=", "to_rest_models", "(", "models", ",", "includes", "=", "req", ".", "includes", ")", "resp", ".", "serialize", "(", "props", ")", "signals", ".", "post_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "post_req_search", ".", "send", "(", "resc", ".", "model", ")" ]
Get the models identified by query parameters We return an empty list if no models are found.
[ "Get", "the", "models", "identified", "by", "query", "parameters" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/models.py#L20-L39
245,070
sassoo/goldman
goldman/resources/models.py
on_post
def on_post(resc, req, resp): """ Deserialize the payload & create the new single item """ signals.pre_req.send(resc.model) signals.pre_req_create.send(resc.model) props = req.deserialize() model = resc.model() from_rest(model, props) goldman.sess.store.create(model) props = to_rest_model(model, includes=req.includes) resp.last_modified = model.updated resp.location = '%s/%s' % (req.path, model.rid_value) resp.status = falcon.HTTP_201 resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_create.send(resc.model)
python
def on_post(resc, req, resp): """ Deserialize the payload & create the new single item """ signals.pre_req.send(resc.model) signals.pre_req_create.send(resc.model) props = req.deserialize() model = resc.model() from_rest(model, props) goldman.sess.store.create(model) props = to_rest_model(model, includes=req.includes) resp.last_modified = model.updated resp.location = '%s/%s' % (req.path, model.rid_value) resp.status = falcon.HTTP_201 resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_create.send(resc.model)
[ "def", "on_post", "(", "resc", ",", "req", ",", "resp", ")", ":", "signals", ".", "pre_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "pre_req_create", ".", "send", "(", "resc", ".", "model", ")", "props", "=", "req", ".", "deserialize", "(", ")", "model", "=", "resc", ".", "model", "(", ")", "from_rest", "(", "model", ",", "props", ")", "goldman", ".", "sess", ".", "store", ".", "create", "(", "model", ")", "props", "=", "to_rest_model", "(", "model", ",", "includes", "=", "req", ".", "includes", ")", "resp", ".", "last_modified", "=", "model", ".", "updated", "resp", ".", "location", "=", "'%s/%s'", "%", "(", "req", ".", "path", ",", "model", ".", "rid_value", ")", "resp", ".", "status", "=", "falcon", ".", "HTTP_201", "resp", ".", "serialize", "(", "props", ")", "signals", ".", "post_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "post_req_create", ".", "send", "(", "resc", ".", "model", ")" ]
Deserialize the payload & create the new single item
[ "Deserialize", "the", "payload", "&", "create", "the", "new", "single", "item" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/models.py#L42-L61
245,071
pip-services3-python/pip-services3-components-python
pip_services3_components/auth/MemoryCredentialStore.py
MemoryCredentialStore.read_credentials
def read_credentials(self, credentials): """ Reads credentials from configuration parameters. Each section represents an individual CredentialParams :param credentials: configuration parameters to be read """ self._items.clear() for key in credentials.get_key_names(): value = credentials.get_as_nullable_string(key) self._items.append(CredentialParams.from_tuples([key, value]))
python
def read_credentials(self, credentials): """ Reads credentials from configuration parameters. Each section represents an individual CredentialParams :param credentials: configuration parameters to be read """ self._items.clear() for key in credentials.get_key_names(): value = credentials.get_as_nullable_string(key) self._items.append(CredentialParams.from_tuples([key, value]))
[ "def", "read_credentials", "(", "self", ",", "credentials", ")", ":", "self", ".", "_items", ".", "clear", "(", ")", "for", "key", "in", "credentials", ".", "get_key_names", "(", ")", ":", "value", "=", "credentials", ".", "get_as_nullable_string", "(", "key", ")", "self", ".", "_items", ".", "append", "(", "CredentialParams", ".", "from_tuples", "(", "[", "key", ",", "value", "]", ")", ")" ]
Reads credentials from configuration parameters. Each section represents an individual CredentialParams :param credentials: configuration parameters to be read
[ "Reads", "credentials", "from", "configuration", "parameters", ".", "Each", "section", "represents", "an", "individual", "CredentialParams" ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/MemoryCredentialStore.py#L60-L70
245,072
pip-services3-python/pip-services3-components-python
pip_services3_components/auth/MemoryCredentialStore.py
MemoryCredentialStore.store
def store(self, correlation_id, key, credential): """ Stores credential parameters into the store. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential parameters. :param credential: a credential parameters to be stored. """ if credential != None: self._items.put(key, credential) else: self._items.remove(key)
python
def store(self, correlation_id, key, credential): """ Stores credential parameters into the store. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential parameters. :param credential: a credential parameters to be stored. """ if credential != None: self._items.put(key, credential) else: self._items.remove(key)
[ "def", "store", "(", "self", ",", "correlation_id", ",", "key", ",", "credential", ")", ":", "if", "credential", "!=", "None", ":", "self", ".", "_items", ".", "put", "(", "key", ",", "credential", ")", "else", ":", "self", ".", "_items", ".", "remove", "(", "key", ")" ]
Stores credential parameters into the store. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential parameters. :param credential: a credential parameters to be stored.
[ "Stores", "credential", "parameters", "into", "the", "store", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/auth/MemoryCredentialStore.py#L72-L85
245,073
rameshg87/pyremotevbox
pyremotevbox/ZSI/twisted/interfaces.py
CheckInputArgs
def CheckInputArgs(*interfaces): """Must provide at least one interface, the last one may be repeated. """ l = len(interfaces) def wrapper(func): def check_args(self, *args, **kw): for i in range(len(args)): if (l > i and interfaces[i].providedBy(args[i])) or interfaces[-1].providedBy(args[i]): continue if l > i: raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[i]) raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[-1]) func(self, *args, **kw) return check_args return wrapper
python
def CheckInputArgs(*interfaces): """Must provide at least one interface, the last one may be repeated. """ l = len(interfaces) def wrapper(func): def check_args(self, *args, **kw): for i in range(len(args)): if (l > i and interfaces[i].providedBy(args[i])) or interfaces[-1].providedBy(args[i]): continue if l > i: raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[i]) raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[-1]) func(self, *args, **kw) return check_args return wrapper
[ "def", "CheckInputArgs", "(", "*", "interfaces", ")", ":", "l", "=", "len", "(", "interfaces", ")", "def", "wrapper", "(", "func", ")", ":", "def", "check_args", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "for", "i", "in", "range", "(", "len", "(", "args", ")", ")", ":", "if", "(", "l", ">", "i", "and", "interfaces", "[", "i", "]", ".", "providedBy", "(", "args", "[", "i", "]", ")", ")", "or", "interfaces", "[", "-", "1", "]", ".", "providedBy", "(", "args", "[", "i", "]", ")", ":", "continue", "if", "l", ">", "i", ":", "raise", "TypeError", ",", "'arg %s does not implement %s'", "%", "(", "args", "[", "i", "]", ",", "interfaces", "[", "i", "]", ")", "raise", "TypeError", ",", "'arg %s does not implement %s'", "%", "(", "args", "[", "i", "]", ",", "interfaces", "[", "-", "1", "]", ")", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", "return", "check_args", "return", "wrapper" ]
Must provide at least one interface, the last one may be repeated.
[ "Must", "provide", "at", "least", "one", "interface", "the", "last", "one", "may", "be", "repeated", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/twisted/interfaces.py#L20-L33
245,074
alexmojaki/littleutils
littleutils/__init__.py
group_by_key_func
def group_by_key_func(iterable, key_func): """ Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements of the iterable and the values are lists of elements all of which correspond to the key. >>> def si(d): return sorted(d.items()) >>> si(group_by_key_func("a bb ccc d ee fff".split(), len)) [(1, ['a', 'd']), (2, ['bb', 'ee']), (3, ['ccc', 'fff'])] >>> si(group_by_key_func([-1, 0, 1, 3, 6, 8, 9, 2], lambda x: x % 2)) [(0, [0, 6, 8, 2]), (1, [-1, 1, 3, 9])] """ result = defaultdict(list) for item in iterable: result[key_func(item)].append(item) return result
python
def group_by_key_func(iterable, key_func): """ Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements of the iterable and the values are lists of elements all of which correspond to the key. >>> def si(d): return sorted(d.items()) >>> si(group_by_key_func("a bb ccc d ee fff".split(), len)) [(1, ['a', 'd']), (2, ['bb', 'ee']), (3, ['ccc', 'fff'])] >>> si(group_by_key_func([-1, 0, 1, 3, 6, 8, 9, 2], lambda x: x % 2)) [(0, [0, 6, 8, 2]), (1, [-1, 1, 3, 9])] """ result = defaultdict(list) for item in iterable: result[key_func(item)].append(item) return result
[ "def", "group_by_key_func", "(", "iterable", ",", "key_func", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "item", "in", "iterable", ":", "result", "[", "key_func", "(", "item", ")", "]", ".", "append", "(", "item", ")", "return", "result" ]
Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements of the iterable and the values are lists of elements all of which correspond to the key. >>> def si(d): return sorted(d.items()) >>> si(group_by_key_func("a bb ccc d ee fff".split(), len)) [(1, ['a', 'd']), (2, ['bb', 'ee']), (3, ['ccc', 'fff'])] >>> si(group_by_key_func([-1, 0, 1, 3, 6, 8, 9, 2], lambda x: x % 2)) [(0, [0, 6, 8, 2]), (1, [-1, 1, 3, 9])]
[ "Create", "a", "dictionary", "from", "an", "iterable", "such", "that", "the", "keys", "are", "the", "result", "of", "evaluating", "a", "key", "function", "on", "elements", "of", "the", "iterable", "and", "the", "values", "are", "lists", "of", "elements", "all", "of", "which", "correspond", "to", "the", "key", "." ]
1132d2d2782b05741a907d1281cd8c001f1d1d9d
https://github.com/alexmojaki/littleutils/blob/1132d2d2782b05741a907d1281cd8c001f1d1d9d/littleutils/__init__.py#L437-L451
245,075
radjkarl/fancyTools
fancytools/utils/formatedTime.py
formatedTime
def formatedTime(ms): """ convert milliseconds in a human readable time >>> formatedTime(60e3) '1m' >>> formatedTime(1000e3) '16m 40s' >>> formatedTime(200000123) '2d 7h 33m 20.123s' """ if ms: s = ms / 1000.0 m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) out = '' if d: out += '%gd ' % d if h: out += '%gh ' % h if m: out += '%gm ' % m if s: out += '%gs ' % s return out[:-1] return ''
python
def formatedTime(ms): """ convert milliseconds in a human readable time >>> formatedTime(60e3) '1m' >>> formatedTime(1000e3) '16m 40s' >>> formatedTime(200000123) '2d 7h 33m 20.123s' """ if ms: s = ms / 1000.0 m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) out = '' if d: out += '%gd ' % d if h: out += '%gh ' % h if m: out += '%gm ' % m if s: out += '%gs ' % s return out[:-1] return ''
[ "def", "formatedTime", "(", "ms", ")", ":", "if", "ms", ":", "s", "=", "ms", "/", "1000.0", "m", ",", "s", "=", "divmod", "(", "s", ",", "60", ")", "h", ",", "m", "=", "divmod", "(", "m", ",", "60", ")", "d", ",", "h", "=", "divmod", "(", "h", ",", "24", ")", "out", "=", "''", "if", "d", ":", "out", "+=", "'%gd '", "%", "d", "if", "h", ":", "out", "+=", "'%gh '", "%", "h", "if", "m", ":", "out", "+=", "'%gm '", "%", "m", "if", "s", ":", "out", "+=", "'%gs '", "%", "s", "return", "out", "[", ":", "-", "1", "]", "return", "''" ]
convert milliseconds in a human readable time >>> formatedTime(60e3) '1m' >>> formatedTime(1000e3) '16m 40s' >>> formatedTime(200000123) '2d 7h 33m 20.123s'
[ "convert", "milliseconds", "in", "a", "human", "readable", "time" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/utils/formatedTime.py#L5-L32
245,076
collectiveacuity/labPack
labpack/handlers/requests.py
requestsHandler._check_connectivity
def _check_connectivity(self, err): ''' a method to check connectivity as source of error ''' try: import requests requests.get(self.uptime_ssl) except: from requests import Request request_object = Request(method='GET', url=self.uptime_ssl) request_details = self.handle_requests(request_object) self.printer('ERROR.') raise ConnectionError(request_details['error']) self.printer('ERROR.') raise err
python
def _check_connectivity(self, err): ''' a method to check connectivity as source of error ''' try: import requests requests.get(self.uptime_ssl) except: from requests import Request request_object = Request(method='GET', url=self.uptime_ssl) request_details = self.handle_requests(request_object) self.printer('ERROR.') raise ConnectionError(request_details['error']) self.printer('ERROR.') raise err
[ "def", "_check_connectivity", "(", "self", ",", "err", ")", ":", "try", ":", "import", "requests", "requests", ".", "get", "(", "self", ".", "uptime_ssl", ")", "except", ":", "from", "requests", "import", "Request", "request_object", "=", "Request", "(", "method", "=", "'GET'", ",", "url", "=", "self", ".", "uptime_ssl", ")", "request_details", "=", "self", ".", "handle_requests", "(", "request_object", ")", "self", ".", "printer", "(", "'ERROR.'", ")", "raise", "ConnectionError", "(", "request_details", "[", "'error'", "]", ")", "self", ".", "printer", "(", "'ERROR.'", ")", "raise", "err" ]
a method to check connectivity as source of error
[ "a", "method", "to", "check", "connectivity", "as", "source", "of", "error" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/handlers/requests.py#L148-L162
245,077
collectiveacuity/labPack
labpack/handlers/requests.py
requestsHandler._request
def _request(self, **kwargs): ''' a helper method for processing all request types ''' response = None error = '' code = 0 # send request from requests import request try: response = request(**kwargs) # handle response if self.handle_response: response, error, code = self.handle_response(response) else: code = response.status_code # handle errors except Exception as err: from requests import Request request_object = Request(**kwargs) try: request_details = self.handle_requests(request_object) error = request_details['error'] except: error = str(err) return response, error, code
python
def _request(self, **kwargs): ''' a helper method for processing all request types ''' response = None error = '' code = 0 # send request from requests import request try: response = request(**kwargs) # handle response if self.handle_response: response, error, code = self.handle_response(response) else: code = response.status_code # handle errors except Exception as err: from requests import Request request_object = Request(**kwargs) try: request_details = self.handle_requests(request_object) error = request_details['error'] except: error = str(err) return response, error, code
[ "def", "_request", "(", "self", ",", "*", "*", "kwargs", ")", ":", "response", "=", "None", "error", "=", "''", "code", "=", "0", "# send request\r", "from", "requests", "import", "request", "try", ":", "response", "=", "request", "(", "*", "*", "kwargs", ")", "# handle response\r", "if", "self", ".", "handle_response", ":", "response", ",", "error", ",", "code", "=", "self", ".", "handle_response", "(", "response", ")", "else", ":", "code", "=", "response", ".", "status_code", "# handle errors\r", "except", "Exception", "as", "err", ":", "from", "requests", "import", "Request", "request_object", "=", "Request", "(", "*", "*", "kwargs", ")", "try", ":", "request_details", "=", "self", ".", "handle_requests", "(", "request_object", ")", "error", "=", "request_details", "[", "'error'", "]", "except", ":", "error", "=", "str", "(", "err", ")", "return", "response", ",", "error", ",", "code" ]
a helper method for processing all request types
[ "a", "helper", "method", "for", "processing", "all", "request", "types" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/handlers/requests.py#L164-L191
245,078
collectiveacuity/labPack
labpack/handlers/requests.py
requestsHandler._get_request
def _get_request(self, url, params=None, **kwargs): ''' a method to catch and report http get request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'GET', 'url': url, 'params': params } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
python
def _get_request(self, url, params=None, **kwargs): ''' a method to catch and report http get request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'GET', 'url': url, 'params': params } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
[ "def", "_get_request", "(", "self", ",", "url", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# construct request kwargs\r", "request_kwargs", "=", "{", "'method'", ":", "'GET'", ",", "'url'", ":", "url", ",", "'params'", ":", "params", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "request_kwargs", "[", "key", "]", "=", "value", "# send request and handle response\r", "return", "self", ".", "_request", "(", "*", "*", "request_kwargs", ")" ]
a method to catch and report http get request connectivity errors
[ "a", "method", "to", "catch", "and", "report", "http", "get", "request", "connectivity", "errors" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/handlers/requests.py#L193-L207
245,079
collectiveacuity/labPack
labpack/handlers/requests.py
requestsHandler._post_request
def _post_request(self, url, data=None, json=None, **kwargs): ''' a method to catch and report http post request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'POST', 'url': url, 'data': data, 'json': json } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
python
def _post_request(self, url, data=None, json=None, **kwargs): ''' a method to catch and report http post request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'POST', 'url': url, 'data': data, 'json': json } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
[ "def", "_post_request", "(", "self", ",", "url", ",", "data", "=", "None", ",", "json", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# construct request kwargs\r", "request_kwargs", "=", "{", "'method'", ":", "'POST'", ",", "'url'", ":", "url", ",", "'data'", ":", "data", ",", "'json'", ":", "json", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "request_kwargs", "[", "key", "]", "=", "value", "# send request and handle response\r", "return", "self", ".", "_request", "(", "*", "*", "request_kwargs", ")" ]
a method to catch and report http post request connectivity errors
[ "a", "method", "to", "catch", "and", "report", "http", "post", "request", "connectivity", "errors" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/handlers/requests.py#L209-L224
245,080
collectiveacuity/labPack
labpack/handlers/requests.py
requestsHandler._options_request
def _options_request(self, url, **kwargs): ''' a method to catch and report http options request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'OPTIONS', 'url': url } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
python
def _options_request(self, url, **kwargs): ''' a method to catch and report http options request connectivity errors ''' # construct request kwargs request_kwargs = { 'method': 'OPTIONS', 'url': url } for key, value in kwargs.items(): request_kwargs[key] = value # send request and handle response return self._request(**request_kwargs)
[ "def", "_options_request", "(", "self", ",", "url", ",", "*", "*", "kwargs", ")", ":", "# construct request kwargs\r", "request_kwargs", "=", "{", "'method'", ":", "'OPTIONS'", ",", "'url'", ":", "url", "}", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "request_kwargs", "[", "key", "]", "=", "value", "# send request and handle response\r", "return", "self", ".", "_request", "(", "*", "*", "request_kwargs", ")" ]
a method to catch and report http options request connectivity errors
[ "a", "method", "to", "catch", "and", "report", "http", "options", "request", "connectivity", "errors" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/handlers/requests.py#L275-L288
245,081
dossier/dossier.models
dossier/models/soft_selectors.py
find_soft_selectors
def find_soft_selectors(ids_and_clean_visible, start_num_tokens='10', max_num_tokens='20', filter_punctuation='0'): '''External interface for dossier.models.soft_selectors. This at scans through `num_tokens` values between `start_num_tokens` and `max_num_tokens` and calls `find_soft_selectors_at_n` looking for results All of the params can be passed from URL parameters, in which case they can be strings and this function will type cast them appropriately. ''' start_num_tokens = int(start_num_tokens) max_num_tokens = int(max_num_tokens) filter_punctuation = bool(int(filter_punctuation)) if not ids_and_clean_visible: logger.info('find_soft_selectors called with no ids_and_clean_visible') return [] current_results = [] ## results from current n previous_results = [] ## previous results from last n overall_results = [] ## overall results to return for num_tokens in range(start_num_tokens, max_num_tokens + 1): ## update this here previous_results = current_results results_at_n = find_soft_selectors_at_n( ids_and_clean_visible, num_tokens, filter_punctuation) if len(results_at_n) == 0: break best_score = results_at_n[0]['score'] ## i.e. the initial condition is they all have the same score idx_at_second = len(results_at_n) for idx, result in enumerate(results_at_n): if result['score'] < best_score: idx_at_second = idx break current_results = results_at_n[0:idx_at_second] if num_tokens == 8: for r in results_at_n: logger.info('%s --- score: %d' % (r['phrase'], r['score'])) if previous_results == []: logger.info('Previous results are empty. Continuing.') continue ## now, the main idea is to figure out if any strings from previous ## are substrings of those from current ## (with the scores fixed at the max for that subphrase). ## when they stop being substrings ## then those are completed phrases and should be returned as a result for prev_result in previous_results: is_subbed_and_same_score = False for curr_result in current_results: if prev_result['phrase'] in curr_result['phrase'] and \ prev_result['score'] == curr_result['score'] : is_subbed_and_same_score = True break if not is_subbed_and_same_score: ## then it's a honest result prev_result['n'] = num_tokens - 1 overall_results.append(prev_result) if len(current_results) == 0: ## we got them all ## (we still had to collect the previous results) ## that's why this break comes after the previous for loop break ## also add results from current_results at final n for result in current_results: result['n'] = num_tokens overall_results.append(result) ## sort by score then by length overall_results.sort(key=itemgetter('score', 'n'), reverse=True) logger.info('OVERALL RESULTS: %d' % len(overall_results)) # for idx, result in enumerate(overall_results): # logger.info('%d. %s --- score: %f , n = %d, hits=%d' % # (idx, result['phrase'], result['score'], result['n'], len(result['hits'])) # ) return overall_results
python
def find_soft_selectors(ids_and_clean_visible, start_num_tokens='10', max_num_tokens='20', filter_punctuation='0'): '''External interface for dossier.models.soft_selectors. This at scans through `num_tokens` values between `start_num_tokens` and `max_num_tokens` and calls `find_soft_selectors_at_n` looking for results All of the params can be passed from URL parameters, in which case they can be strings and this function will type cast them appropriately. ''' start_num_tokens = int(start_num_tokens) max_num_tokens = int(max_num_tokens) filter_punctuation = bool(int(filter_punctuation)) if not ids_and_clean_visible: logger.info('find_soft_selectors called with no ids_and_clean_visible') return [] current_results = [] ## results from current n previous_results = [] ## previous results from last n overall_results = [] ## overall results to return for num_tokens in range(start_num_tokens, max_num_tokens + 1): ## update this here previous_results = current_results results_at_n = find_soft_selectors_at_n( ids_and_clean_visible, num_tokens, filter_punctuation) if len(results_at_n) == 0: break best_score = results_at_n[0]['score'] ## i.e. the initial condition is they all have the same score idx_at_second = len(results_at_n) for idx, result in enumerate(results_at_n): if result['score'] < best_score: idx_at_second = idx break current_results = results_at_n[0:idx_at_second] if num_tokens == 8: for r in results_at_n: logger.info('%s --- score: %d' % (r['phrase'], r['score'])) if previous_results == []: logger.info('Previous results are empty. Continuing.') continue ## now, the main idea is to figure out if any strings from previous ## are substrings of those from current ## (with the scores fixed at the max for that subphrase). ## when they stop being substrings ## then those are completed phrases and should be returned as a result for prev_result in previous_results: is_subbed_and_same_score = False for curr_result in current_results: if prev_result['phrase'] in curr_result['phrase'] and \ prev_result['score'] == curr_result['score'] : is_subbed_and_same_score = True break if not is_subbed_and_same_score: ## then it's a honest result prev_result['n'] = num_tokens - 1 overall_results.append(prev_result) if len(current_results) == 0: ## we got them all ## (we still had to collect the previous results) ## that's why this break comes after the previous for loop break ## also add results from current_results at final n for result in current_results: result['n'] = num_tokens overall_results.append(result) ## sort by score then by length overall_results.sort(key=itemgetter('score', 'n'), reverse=True) logger.info('OVERALL RESULTS: %d' % len(overall_results)) # for idx, result in enumerate(overall_results): # logger.info('%d. %s --- score: %f , n = %d, hits=%d' % # (idx, result['phrase'], result['score'], result['n'], len(result['hits'])) # ) return overall_results
[ "def", "find_soft_selectors", "(", "ids_and_clean_visible", ",", "start_num_tokens", "=", "'10'", ",", "max_num_tokens", "=", "'20'", ",", "filter_punctuation", "=", "'0'", ")", ":", "start_num_tokens", "=", "int", "(", "start_num_tokens", ")", "max_num_tokens", "=", "int", "(", "max_num_tokens", ")", "filter_punctuation", "=", "bool", "(", "int", "(", "filter_punctuation", ")", ")", "if", "not", "ids_and_clean_visible", ":", "logger", ".", "info", "(", "'find_soft_selectors called with no ids_and_clean_visible'", ")", "return", "[", "]", "current_results", "=", "[", "]", "## results from current n", "previous_results", "=", "[", "]", "## previous results from last n", "overall_results", "=", "[", "]", "## overall results to return", "for", "num_tokens", "in", "range", "(", "start_num_tokens", ",", "max_num_tokens", "+", "1", ")", ":", "## update this here", "previous_results", "=", "current_results", "results_at_n", "=", "find_soft_selectors_at_n", "(", "ids_and_clean_visible", ",", "num_tokens", ",", "filter_punctuation", ")", "if", "len", "(", "results_at_n", ")", "==", "0", ":", "break", "best_score", "=", "results_at_n", "[", "0", "]", "[", "'score'", "]", "## i.e. the initial condition is they all have the same score", "idx_at_second", "=", "len", "(", "results_at_n", ")", "for", "idx", ",", "result", "in", "enumerate", "(", "results_at_n", ")", ":", "if", "result", "[", "'score'", "]", "<", "best_score", ":", "idx_at_second", "=", "idx", "break", "current_results", "=", "results_at_n", "[", "0", ":", "idx_at_second", "]", "if", "num_tokens", "==", "8", ":", "for", "r", "in", "results_at_n", ":", "logger", ".", "info", "(", "'%s --- score: %d'", "%", "(", "r", "[", "'phrase'", "]", ",", "r", "[", "'score'", "]", ")", ")", "if", "previous_results", "==", "[", "]", ":", "logger", ".", "info", "(", "'Previous results are empty. Continuing.'", ")", "continue", "## now, the main idea is to figure out if any strings from previous", "## are substrings of those from current", "## (with the scores fixed at the max for that subphrase).", "## when they stop being substrings", "## then those are completed phrases and should be returned as a result", "for", "prev_result", "in", "previous_results", ":", "is_subbed_and_same_score", "=", "False", "for", "curr_result", "in", "current_results", ":", "if", "prev_result", "[", "'phrase'", "]", "in", "curr_result", "[", "'phrase'", "]", "and", "prev_result", "[", "'score'", "]", "==", "curr_result", "[", "'score'", "]", ":", "is_subbed_and_same_score", "=", "True", "break", "if", "not", "is_subbed_and_same_score", ":", "## then it's a honest result", "prev_result", "[", "'n'", "]", "=", "num_tokens", "-", "1", "overall_results", ".", "append", "(", "prev_result", ")", "if", "len", "(", "current_results", ")", "==", "0", ":", "## we got them all", "## (we still had to collect the previous results)", "## that's why this break comes after the previous for loop", "break", "## also add results from current_results at final n", "for", "result", "in", "current_results", ":", "result", "[", "'n'", "]", "=", "num_tokens", "overall_results", ".", "append", "(", "result", ")", "## sort by score then by length", "overall_results", ".", "sort", "(", "key", "=", "itemgetter", "(", "'score'", ",", "'n'", ")", ",", "reverse", "=", "True", ")", "logger", ".", "info", "(", "'OVERALL RESULTS: %d'", "%", "len", "(", "overall_results", ")", ")", "# for idx, result in enumerate(overall_results):", "# logger.info('%d. %s --- score: %f , n = %d, hits=%d' %", "# (idx, result['phrase'], result['score'], result['n'], len(result['hits']))", "# )", "return", "overall_results" ]
External interface for dossier.models.soft_selectors. This at scans through `num_tokens` values between `start_num_tokens` and `max_num_tokens` and calls `find_soft_selectors_at_n` looking for results All of the params can be passed from URL parameters, in which case they can be strings and this function will type cast them appropriately.
[ "External", "interface", "for", "dossier", ".", "models", ".", "soft_selectors", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/soft_selectors.py#L35-L131
245,082
dossier/dossier.models
dossier/models/soft_selectors.py
make_ngram_corpus
def make_ngram_corpus(corpus_clean_visibles, num_tokens, filter_punctuation, zoning_rules=False): '''takes a list of clean_visible texts, such as from StreamItems or FCs, tokenizes all the texts, and constructs n-grams using `num_tokens` sized windows. ``corpus_clean_visibles`` -- list of unicode strings ``num_tokens`` --- the n of the n-grams ``filter_punctuation`` --- if True, punctuation is filtered ''' ## TODO: generatlize this zoning code, so that it works on many ## sites in the HT domain; consider finishing streamcorpus-zoner ## to do this. if filter_punctuation: ## word tokenizer that removes punctuation tokenize = RegexpTokenizer(r'\w+').tokenize backpage_string = 'backpage' end_string = 'Poster' else: #tokenize = word_tokenize tokenize = lambda s: string.split(s) backpage_string = 'backpage.com' end_string = 'Poster\'s' corpus = list() for clean_vis in corpus_clean_visibles: ## crudely skip pages that have "error" if re.search(u'error', clean_vis, re.I & re.UNICODE): continue ## make tokens tokens = tokenize(clean_vis) ## already a unicode string if zoning_rules: ## filter out non backpage pages if backpage_string not in tokens: continue ## string that signals the beginning of the body try: idx0 = tokens.index('Reply') except: continue ## string that signals the end of the body try: idx1 = tokens.index(end_string) except: continue tokens = tokens[idx0:idx1] ## make ngrams, attach to make strings ngrams_strings = list() for ngram_tuple in ngrams(tokens, num_tokens): # ## attempt to remove unwanted phrases ## score with many_stop_words and drop bad tuples # stop_count = sum([int(bool(tok.lower() in stop_words)) # for tok in ngram_tuple]) # if stop_count > num_tokens / 1.5: # continue ## remove ones with many repeated words if len(set(ngram_tuple)) < len(ngram_tuple) / 2: continue ## this adds ngrams for the current doc ngrams_strings.append(' '.join(ngram_tuple)) ## this adds a list of all the ngrams from the current doc ## to the corpus list corpus.append(ngrams_strings) return corpus
python
def make_ngram_corpus(corpus_clean_visibles, num_tokens, filter_punctuation, zoning_rules=False): '''takes a list of clean_visible texts, such as from StreamItems or FCs, tokenizes all the texts, and constructs n-grams using `num_tokens` sized windows. ``corpus_clean_visibles`` -- list of unicode strings ``num_tokens`` --- the n of the n-grams ``filter_punctuation`` --- if True, punctuation is filtered ''' ## TODO: generatlize this zoning code, so that it works on many ## sites in the HT domain; consider finishing streamcorpus-zoner ## to do this. if filter_punctuation: ## word tokenizer that removes punctuation tokenize = RegexpTokenizer(r'\w+').tokenize backpage_string = 'backpage' end_string = 'Poster' else: #tokenize = word_tokenize tokenize = lambda s: string.split(s) backpage_string = 'backpage.com' end_string = 'Poster\'s' corpus = list() for clean_vis in corpus_clean_visibles: ## crudely skip pages that have "error" if re.search(u'error', clean_vis, re.I & re.UNICODE): continue ## make tokens tokens = tokenize(clean_vis) ## already a unicode string if zoning_rules: ## filter out non backpage pages if backpage_string not in tokens: continue ## string that signals the beginning of the body try: idx0 = tokens.index('Reply') except: continue ## string that signals the end of the body try: idx1 = tokens.index(end_string) except: continue tokens = tokens[idx0:idx1] ## make ngrams, attach to make strings ngrams_strings = list() for ngram_tuple in ngrams(tokens, num_tokens): # ## attempt to remove unwanted phrases ## score with many_stop_words and drop bad tuples # stop_count = sum([int(bool(tok.lower() in stop_words)) # for tok in ngram_tuple]) # if stop_count > num_tokens / 1.5: # continue ## remove ones with many repeated words if len(set(ngram_tuple)) < len(ngram_tuple) / 2: continue ## this adds ngrams for the current doc ngrams_strings.append(' '.join(ngram_tuple)) ## this adds a list of all the ngrams from the current doc ## to the corpus list corpus.append(ngrams_strings) return corpus
[ "def", "make_ngram_corpus", "(", "corpus_clean_visibles", ",", "num_tokens", ",", "filter_punctuation", ",", "zoning_rules", "=", "False", ")", ":", "## TODO: generatlize this zoning code, so that it works on many", "## sites in the HT domain; consider finishing streamcorpus-zoner", "## to do this.", "if", "filter_punctuation", ":", "## word tokenizer that removes punctuation", "tokenize", "=", "RegexpTokenizer", "(", "r'\\w+'", ")", ".", "tokenize", "backpage_string", "=", "'backpage'", "end_string", "=", "'Poster'", "else", ":", "#tokenize = word_tokenize", "tokenize", "=", "lambda", "s", ":", "string", ".", "split", "(", "s", ")", "backpage_string", "=", "'backpage.com'", "end_string", "=", "'Poster\\'s'", "corpus", "=", "list", "(", ")", "for", "clean_vis", "in", "corpus_clean_visibles", ":", "## crudely skip pages that have \"error\"", "if", "re", ".", "search", "(", "u'error'", ",", "clean_vis", ",", "re", ".", "I", "&", "re", ".", "UNICODE", ")", ":", "continue", "## make tokens", "tokens", "=", "tokenize", "(", "clean_vis", ")", "## already a unicode string", "if", "zoning_rules", ":", "## filter out non backpage pages", "if", "backpage_string", "not", "in", "tokens", ":", "continue", "## string that signals the beginning of the body", "try", ":", "idx0", "=", "tokens", ".", "index", "(", "'Reply'", ")", "except", ":", "continue", "## string that signals the end of the body", "try", ":", "idx1", "=", "tokens", ".", "index", "(", "end_string", ")", "except", ":", "continue", "tokens", "=", "tokens", "[", "idx0", ":", "idx1", "]", "## make ngrams, attach to make strings", "ngrams_strings", "=", "list", "(", ")", "for", "ngram_tuple", "in", "ngrams", "(", "tokens", ",", "num_tokens", ")", ":", "# ## attempt to remove unwanted phrases", "## score with many_stop_words and drop bad tuples", "# stop_count = sum([int(bool(tok.lower() in stop_words))", "# for tok in ngram_tuple])", "# if stop_count > num_tokens / 1.5:", "# continue", "## remove ones with many repeated words", "if", "len", "(", "set", "(", "ngram_tuple", ")", ")", "<", "len", "(", "ngram_tuple", ")", "/", "2", ":", "continue", "## this adds ngrams for the current doc", "ngrams_strings", ".", "append", "(", "' '", ".", "join", "(", "ngram_tuple", ")", ")", "## this adds a list of all the ngrams from the current doc", "## to the corpus list", "corpus", ".", "append", "(", "ngrams_strings", ")", "return", "corpus" ]
takes a list of clean_visible texts, such as from StreamItems or FCs, tokenizes all the texts, and constructs n-grams using `num_tokens` sized windows. ``corpus_clean_visibles`` -- list of unicode strings ``num_tokens`` --- the n of the n-grams ``filter_punctuation`` --- if True, punctuation is filtered
[ "takes", "a", "list", "of", "clean_visible", "texts", "such", "as", "from", "StreamItems", "or", "FCs", "tokenizes", "all", "the", "texts", "and", "constructs", "n", "-", "grams", "using", "num_tokens", "sized", "windows", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/soft_selectors.py#L184-L258
245,083
dossier/dossier.models
dossier/models/soft_selectors.py
ids_and_clean_visible_from_streamcorpus_chunk_path
def ids_and_clean_visible_from_streamcorpus_chunk_path(corpus_path): '''converts a streamcorpus.Chunk file into the structure that is passed by the search engine to find_soft_selectors ''' ch = clean_html(clean_html.default_config) cv = clean_visible(clean_visible.default_config) ids_and_clean_visible = [] for si in streamcorpus.Chunk(path=corpus_path): if not si.body.clean_visible: ## attempt to make clean_visible if not si.body.raw: logger.critical('no raw content, so skipping: %r', si.abs_url) continue abs_url = si.abs_url si = ch(si, {}) if not si: logger.critical( 'failed to make clean_html, so skipping: %r', abs_url) continue si = cv(si, {}) if not si or not si.body.clean_visible: logger.critical( 'failed to make clean_visible, so skipping: %r', abs_url) continue rec = (si.stream_id, si.body.clean_visible.decode('utf8'), {}) ids_and_clean_visible.append(rec) return ids_and_clean_visible
python
def ids_and_clean_visible_from_streamcorpus_chunk_path(corpus_path): '''converts a streamcorpus.Chunk file into the structure that is passed by the search engine to find_soft_selectors ''' ch = clean_html(clean_html.default_config) cv = clean_visible(clean_visible.default_config) ids_and_clean_visible = [] for si in streamcorpus.Chunk(path=corpus_path): if not si.body.clean_visible: ## attempt to make clean_visible if not si.body.raw: logger.critical('no raw content, so skipping: %r', si.abs_url) continue abs_url = si.abs_url si = ch(si, {}) if not si: logger.critical( 'failed to make clean_html, so skipping: %r', abs_url) continue si = cv(si, {}) if not si or not si.body.clean_visible: logger.critical( 'failed to make clean_visible, so skipping: %r', abs_url) continue rec = (si.stream_id, si.body.clean_visible.decode('utf8'), {}) ids_and_clean_visible.append(rec) return ids_and_clean_visible
[ "def", "ids_and_clean_visible_from_streamcorpus_chunk_path", "(", "corpus_path", ")", ":", "ch", "=", "clean_html", "(", "clean_html", ".", "default_config", ")", "cv", "=", "clean_visible", "(", "clean_visible", ".", "default_config", ")", "ids_and_clean_visible", "=", "[", "]", "for", "si", "in", "streamcorpus", ".", "Chunk", "(", "path", "=", "corpus_path", ")", ":", "if", "not", "si", ".", "body", ".", "clean_visible", ":", "## attempt to make clean_visible", "if", "not", "si", ".", "body", ".", "raw", ":", "logger", ".", "critical", "(", "'no raw content, so skipping: %r'", ",", "si", ".", "abs_url", ")", "continue", "abs_url", "=", "si", ".", "abs_url", "si", "=", "ch", "(", "si", ",", "{", "}", ")", "if", "not", "si", ":", "logger", ".", "critical", "(", "'failed to make clean_html, so skipping: %r'", ",", "abs_url", ")", "continue", "si", "=", "cv", "(", "si", ",", "{", "}", ")", "if", "not", "si", "or", "not", "si", ".", "body", ".", "clean_visible", ":", "logger", ".", "critical", "(", "'failed to make clean_visible, so skipping: %r'", ",", "abs_url", ")", "continue", "rec", "=", "(", "si", ".", "stream_id", ",", "si", ".", "body", ".", "clean_visible", ".", "decode", "(", "'utf8'", ")", ",", "{", "}", ")", "ids_and_clean_visible", ".", "append", "(", "rec", ")", "return", "ids_and_clean_visible" ]
converts a streamcorpus.Chunk file into the structure that is passed by the search engine to find_soft_selectors
[ "converts", "a", "streamcorpus", ".", "Chunk", "file", "into", "the", "structure", "that", "is", "passed", "by", "the", "search", "engine", "to", "find_soft_selectors" ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/soft_selectors.py#L261-L288
245,084
sassoo/goldman
goldman/models/login.py
pre_create
def pre_create(sender, model): """ Callback before creating a new login Without a password during create we are forced to set the password to something random & complex. """ if isinstance(model, Model) and not model.password: model.password = random_str()
python
def pre_create(sender, model): """ Callback before creating a new login Without a password during create we are forced to set the password to something random & complex. """ if isinstance(model, Model) and not model.password: model.password = random_str()
[ "def", "pre_create", "(", "sender", ",", "model", ")", ":", "if", "isinstance", "(", "model", ",", "Model", ")", "and", "not", "model", ".", "password", ":", "model", ".", "password", "=", "random_str", "(", ")" ]
Callback before creating a new login Without a password during create we are forced to set the password to something random & complex.
[ "Callback", "before", "creating", "a", "new", "login" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L156-L164
245,085
sassoo/goldman
goldman/models/login.py
pre_save
def pre_save(sender, model): """ Hash the password if being changed """ if isinstance(model, Model) and 'password' in model.dirty_fields: model.salt, model.password = gen_salt_and_hash(model.password)
python
def pre_save(sender, model): """ Hash the password if being changed """ if isinstance(model, Model) and 'password' in model.dirty_fields: model.salt, model.password = gen_salt_and_hash(model.password)
[ "def", "pre_save", "(", "sender", ",", "model", ")", ":", "if", "isinstance", "(", "model", ",", "Model", ")", "and", "'password'", "in", "model", ".", "dirty_fields", ":", "model", ".", "salt", ",", "model", ".", "password", "=", "gen_salt_and_hash", "(", "model", ".", "password", ")" ]
Hash the password if being changed
[ "Hash", "the", "password", "if", "being", "changed" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L167-L171
245,086
sassoo/goldman
goldman/models/login.py
Model.auth_creds
def auth_creds(cls, username, password): """ Validate a username & password A token is returned if auth is successful & can be used to authorize future requests or ignored entirely if the authorization mechanizm does not need it. :return: string token """ store = goldman.sess.store login = store.find(cls.RTYPE, 'username', username) if not login: msg = 'No login found by that username. Spelling error?' raise AuthRejected(**{'detail': msg}) elif login.locked: msg = 'The login account is currently locked out.' raise AuthRejected(**{'detail': msg}) elif not cmp_val_salt_hash(password, login.salt, login.password): msg = 'The password provided is incorrect. Spelling error?' raise AuthRejected(**{'detail': msg}) else: if not login.token: login.token = random_str() login.post_authenticate() return login.token
python
def auth_creds(cls, username, password): """ Validate a username & password A token is returned if auth is successful & can be used to authorize future requests or ignored entirely if the authorization mechanizm does not need it. :return: string token """ store = goldman.sess.store login = store.find(cls.RTYPE, 'username', username) if not login: msg = 'No login found by that username. Spelling error?' raise AuthRejected(**{'detail': msg}) elif login.locked: msg = 'The login account is currently locked out.' raise AuthRejected(**{'detail': msg}) elif not cmp_val_salt_hash(password, login.salt, login.password): msg = 'The password provided is incorrect. Spelling error?' raise AuthRejected(**{'detail': msg}) else: if not login.token: login.token = random_str() login.post_authenticate() return login.token
[ "def", "auth_creds", "(", "cls", ",", "username", ",", "password", ")", ":", "store", "=", "goldman", ".", "sess", ".", "store", "login", "=", "store", ".", "find", "(", "cls", ".", "RTYPE", ",", "'username'", ",", "username", ")", "if", "not", "login", ":", "msg", "=", "'No login found by that username. Spelling error?'", "raise", "AuthRejected", "(", "*", "*", "{", "'detail'", ":", "msg", "}", ")", "elif", "login", ".", "locked", ":", "msg", "=", "'The login account is currently locked out.'", "raise", "AuthRejected", "(", "*", "*", "{", "'detail'", ":", "msg", "}", ")", "elif", "not", "cmp_val_salt_hash", "(", "password", ",", "login", ".", "salt", ",", "login", ".", "password", ")", ":", "msg", "=", "'The password provided is incorrect. Spelling error?'", "raise", "AuthRejected", "(", "*", "*", "{", "'detail'", ":", "msg", "}", ")", "else", ":", "if", "not", "login", ".", "token", ":", "login", ".", "token", "=", "random_str", "(", ")", "login", ".", "post_authenticate", "(", ")", "return", "login", ".", "token" ]
Validate a username & password A token is returned if auth is successful & can be used to authorize future requests or ignored entirely if the authorization mechanizm does not need it. :return: string token
[ "Validate", "a", "username", "&", "password" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L65-L91
245,087
sassoo/goldman
goldman/models/login.py
Model.auth_token
def auth_token(cls, token): """ Callback method for OAuth 2.0 bearer token middleware """ store = goldman.sess.store login = store.find(cls.RTYPE, 'token', token) if not login: msg = 'No login found with that token. It may have been revoked.' raise AuthRejected(**{'detail': msg}) elif login.locked: msg = 'The login account is currently locked out.' raise AuthRejected(**{'detail': msg}) else: login.post_authenticate()
python
def auth_token(cls, token): """ Callback method for OAuth 2.0 bearer token middleware """ store = goldman.sess.store login = store.find(cls.RTYPE, 'token', token) if not login: msg = 'No login found with that token. It may have been revoked.' raise AuthRejected(**{'detail': msg}) elif login.locked: msg = 'The login account is currently locked out.' raise AuthRejected(**{'detail': msg}) else: login.post_authenticate()
[ "def", "auth_token", "(", "cls", ",", "token", ")", ":", "store", "=", "goldman", ".", "sess", ".", "store", "login", "=", "store", ".", "find", "(", "cls", ".", "RTYPE", ",", "'token'", ",", "token", ")", "if", "not", "login", ":", "msg", "=", "'No login found with that token. It may have been revoked.'", "raise", "AuthRejected", "(", "*", "*", "{", "'detail'", ":", "msg", "}", ")", "elif", "login", ".", "locked", ":", "msg", "=", "'The login account is currently locked out.'", "raise", "AuthRejected", "(", "*", "*", "{", "'detail'", ":", "msg", "}", ")", "else", ":", "login", ".", "post_authenticate", "(", ")" ]
Callback method for OAuth 2.0 bearer token middleware
[ "Callback", "method", "for", "OAuth", "2", ".", "0", "bearer", "token", "middleware" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L94-L107
245,088
sassoo/goldman
goldman/models/login.py
Model.post_authenticate
def post_authenticate(self): """ Update the login_date timestamp Initialize the thread local sess.login property with the authenticated login model. The login_date update will be debounced so writes don't occur on every hit of the the API. If the login_date was modified within 15 minutes then don't update it. """ goldman.sess.login = self now = dt.now() if not self.login_date: self.login_date = now else: sec_since_updated = (now - self.login_date).seconds min_since_updated = sec_since_updated / 60 if min_since_updated > 15: self.login_date = now if self.dirty: store = goldman.sess.store store.update(self)
python
def post_authenticate(self): """ Update the login_date timestamp Initialize the thread local sess.login property with the authenticated login model. The login_date update will be debounced so writes don't occur on every hit of the the API. If the login_date was modified within 15 minutes then don't update it. """ goldman.sess.login = self now = dt.now() if not self.login_date: self.login_date = now else: sec_since_updated = (now - self.login_date).seconds min_since_updated = sec_since_updated / 60 if min_since_updated > 15: self.login_date = now if self.dirty: store = goldman.sess.store store.update(self)
[ "def", "post_authenticate", "(", "self", ")", ":", "goldman", ".", "sess", ".", "login", "=", "self", "now", "=", "dt", ".", "now", "(", ")", "if", "not", "self", ".", "login_date", ":", "self", ".", "login_date", "=", "now", "else", ":", "sec_since_updated", "=", "(", "now", "-", "self", ".", "login_date", ")", ".", "seconds", "min_since_updated", "=", "sec_since_updated", "/", "60", "if", "min_since_updated", ">", "15", ":", "self", ".", "login_date", "=", "now", "if", "self", ".", "dirty", ":", "store", "=", "goldman", ".", "sess", ".", "store", "store", ".", "update", "(", "self", ")" ]
Update the login_date timestamp Initialize the thread local sess.login property with the authenticated login model. The login_date update will be debounced so writes don't occur on every hit of the the API. If the login_date was modified within 15 minutes then don't update it.
[ "Update", "the", "login_date", "timestamp" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L109-L134
245,089
sassoo/goldman
goldman/models/login.py
Model.validate_username
def validate_username(self, data, value): """ Ensure the username is unique If the login is being created then simply check if the username is in the store & fail. Otherwise if the login is being updated check if the existing rid on a username match is the same as the login being updated otherwise fail. """ store = goldman.sess.store existing = store.find(data['rtype'], 'username', value) if existing: if not data['rid'] or data['rid'] != existing.rid: raise ValidationError('username is already taken')
python
def validate_username(self, data, value): """ Ensure the username is unique If the login is being created then simply check if the username is in the store & fail. Otherwise if the login is being updated check if the existing rid on a username match is the same as the login being updated otherwise fail. """ store = goldman.sess.store existing = store.find(data['rtype'], 'username', value) if existing: if not data['rid'] or data['rid'] != existing.rid: raise ValidationError('username is already taken')
[ "def", "validate_username", "(", "self", ",", "data", ",", "value", ")", ":", "store", "=", "goldman", ".", "sess", ".", "store", "existing", "=", "store", ".", "find", "(", "data", "[", "'rtype'", "]", ",", "'username'", ",", "value", ")", "if", "existing", ":", "if", "not", "data", "[", "'rid'", "]", "or", "data", "[", "'rid'", "]", "!=", "existing", ".", "rid", ":", "raise", "ValidationError", "(", "'username is already taken'", ")" ]
Ensure the username is unique If the login is being created then simply check if the username is in the store & fail. Otherwise if the login is being updated check if the existing rid on a username match is the same as the login being updated otherwise fail.
[ "Ensure", "the", "username", "is", "unique" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/models/login.py#L136-L152
245,090
pip-services3-python/pip-services3-components-python
pip_services3_components/log/LogLevelConverter.py
LogLevelConverter.to_log_level
def to_log_level(value): """ Converts numbers and strings to standard log level values. :param value: a value to be converted :return: converted log level """ if value == None: return LogLevel.Info value = str(value).upper() if ("0" == value) or ("NOTHING" == value) or ("NONE" == value): return LogLevel.Nothing elif ("1" == value) or ("FATAL" == value): return LogLevel.Fatal elif ("2" == value) or ("ERROR" == value): return LogLevel.Error elif ("3" == value) or ("WARN" == value) or ("WARNING" == value): return LogLevel.Warn elif ("4" == value) or ("INFO" == value): return LogLevel.Info elif ("5" == value) or ("DEBUG" == value): return LogLevel.Debug elif ("6" == value) or ("TRACE" == value): return LogLevel.Trace else: return LogLevel.Info
python
def to_log_level(value): """ Converts numbers and strings to standard log level values. :param value: a value to be converted :return: converted log level """ if value == None: return LogLevel.Info value = str(value).upper() if ("0" == value) or ("NOTHING" == value) or ("NONE" == value): return LogLevel.Nothing elif ("1" == value) or ("FATAL" == value): return LogLevel.Fatal elif ("2" == value) or ("ERROR" == value): return LogLevel.Error elif ("3" == value) or ("WARN" == value) or ("WARNING" == value): return LogLevel.Warn elif ("4" == value) or ("INFO" == value): return LogLevel.Info elif ("5" == value) or ("DEBUG" == value): return LogLevel.Debug elif ("6" == value) or ("TRACE" == value): return LogLevel.Trace else: return LogLevel.Info
[ "def", "to_log_level", "(", "value", ")", ":", "if", "value", "==", "None", ":", "return", "LogLevel", ".", "Info", "value", "=", "str", "(", "value", ")", ".", "upper", "(", ")", "if", "(", "\"0\"", "==", "value", ")", "or", "(", "\"NOTHING\"", "==", "value", ")", "or", "(", "\"NONE\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Nothing", "elif", "(", "\"1\"", "==", "value", ")", "or", "(", "\"FATAL\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Fatal", "elif", "(", "\"2\"", "==", "value", ")", "or", "(", "\"ERROR\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Error", "elif", "(", "\"3\"", "==", "value", ")", "or", "(", "\"WARN\"", "==", "value", ")", "or", "(", "\"WARNING\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Warn", "elif", "(", "\"4\"", "==", "value", ")", "or", "(", "\"INFO\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Info", "elif", "(", "\"5\"", "==", "value", ")", "or", "(", "\"DEBUG\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Debug", "elif", "(", "\"6\"", "==", "value", ")", "or", "(", "\"TRACE\"", "==", "value", ")", ":", "return", "LogLevel", ".", "Trace", "else", ":", "return", "LogLevel", ".", "Info" ]
Converts numbers and strings to standard log level values. :param value: a value to be converted :return: converted log level
[ "Converts", "numbers", "and", "strings", "to", "standard", "log", "level", "values", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/LogLevelConverter.py#L19-L46
245,091
pip-services3-python/pip-services3-components-python
pip_services3_components/log/LogLevelConverter.py
LogLevelConverter.to_string
def to_string(level): """ Converts log level to a string. :param level: a log level to convert :return: log level name string. """ if level == LogLevel.Fatal: return "FATAL" if level == LogLevel.Error: return "ERROR" if level == LogLevel.Warn: return "WARN" if level == LogLevel.Info: return "INFO" if level == LogLevel.Debug: return "DEBUG" if level == LogLevel.Trace: return "TRACE" return "UNDEF"
python
def to_string(level): """ Converts log level to a string. :param level: a log level to convert :return: log level name string. """ if level == LogLevel.Fatal: return "FATAL" if level == LogLevel.Error: return "ERROR" if level == LogLevel.Warn: return "WARN" if level == LogLevel.Info: return "INFO" if level == LogLevel.Debug: return "DEBUG" if level == LogLevel.Trace: return "TRACE" return "UNDEF"
[ "def", "to_string", "(", "level", ")", ":", "if", "level", "==", "LogLevel", ".", "Fatal", ":", "return", "\"FATAL\"", "if", "level", "==", "LogLevel", ".", "Error", ":", "return", "\"ERROR\"", "if", "level", "==", "LogLevel", ".", "Warn", ":", "return", "\"WARN\"", "if", "level", "==", "LogLevel", ".", "Info", ":", "return", "\"INFO\"", "if", "level", "==", "LogLevel", ".", "Debug", ":", "return", "\"DEBUG\"", "if", "level", "==", "LogLevel", ".", "Trace", ":", "return", "\"TRACE\"", "return", "\"UNDEF\"" ]
Converts log level to a string. :param level: a log level to convert :return: log level name string.
[ "Converts", "log", "level", "to", "a", "string", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/LogLevelConverter.py#L49-L69
245,092
stephanepechard/projy
fabfile.py
commit
def commit(message=COMMON_COMMIT_MESSAGE, capture=True): """ git commit with common commit message when omit. """ env.warn_only = True local(u'git commit -am"{}"'.format(message))
python
def commit(message=COMMON_COMMIT_MESSAGE, capture=True): """ git commit with common commit message when omit. """ env.warn_only = True local(u'git commit -am"{}"'.format(message))
[ "def", "commit", "(", "message", "=", "COMMON_COMMIT_MESSAGE", ",", "capture", "=", "True", ")", ":", "env", ".", "warn_only", "=", "True", "local", "(", "u'git commit -am\"{}\"'", ".", "format", "(", "message", ")", ")" ]
git commit with common commit message when omit.
[ "git", "commit", "with", "common", "commit", "message", "when", "omit", "." ]
3146b0e3c207b977e1b51fcb33138746dae83c23
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/fabfile.py#L15-L18
245,093
nefarioustim/parker
parker/store.py
get_filestore_instance
def get_filestore_instance(img_dir=None, data_dir=None): """Return an instance of FileStore.""" global _filestore_instances key = "%s:%s" % (img_dir, data_dir) try: instance = _filestore_instances[key] except KeyError: instance = FileStore( img_dir=img_dir, data_dir=data_dir ) _filestore_instances[key] = instance return instance
python
def get_filestore_instance(img_dir=None, data_dir=None): """Return an instance of FileStore.""" global _filestore_instances key = "%s:%s" % (img_dir, data_dir) try: instance = _filestore_instances[key] except KeyError: instance = FileStore( img_dir=img_dir, data_dir=data_dir ) _filestore_instances[key] = instance return instance
[ "def", "get_filestore_instance", "(", "img_dir", "=", "None", ",", "data_dir", "=", "None", ")", ":", "global", "_filestore_instances", "key", "=", "\"%s:%s\"", "%", "(", "img_dir", ",", "data_dir", ")", "try", ":", "instance", "=", "_filestore_instances", "[", "key", "]", "except", "KeyError", ":", "instance", "=", "FileStore", "(", "img_dir", "=", "img_dir", ",", "data_dir", "=", "data_dir", ")", "_filestore_instances", "[", "key", "]", "=", "instance", "return", "instance" ]
Return an instance of FileStore.
[ "Return", "an", "instance", "of", "FileStore", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/store.py#L16-L30
245,094
nefarioustim/parker
parker/store.py
get_s3store_instance
def get_s3store_instance(bucket): """Return an instance of S3Store.""" global _s3store_instances key = "%s" % bucket try: instance = _s3store_instances[key] except KeyError: instance = S3Store( bucket=bucket ) _s3store_instances[key] = instance return instance
python
def get_s3store_instance(bucket): """Return an instance of S3Store.""" global _s3store_instances key = "%s" % bucket try: instance = _s3store_instances[key] except KeyError: instance = S3Store( bucket=bucket ) _s3store_instances[key] = instance return instance
[ "def", "get_s3store_instance", "(", "bucket", ")", ":", "global", "_s3store_instances", "key", "=", "\"%s\"", "%", "bucket", "try", ":", "instance", "=", "_s3store_instances", "[", "key", "]", "except", "KeyError", ":", "instance", "=", "S3Store", "(", "bucket", "=", "bucket", ")", "_s3store_instances", "[", "key", "]", "=", "instance", "return", "instance" ]
Return an instance of S3Store.
[ "Return", "an", "instance", "of", "S3Store", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/store.py#L33-L47
245,095
collectiveacuity/labPack
labpack/parsing/comparison.py
_compare_dict
def _compare_dict(new_dict, old_dict, change_list=None, root=None): ''' a method for recursively listing changes made to a dictionary :param new_dict: dictionary with new key-value pairs :param old_dict: dictionary with old key-value pairs :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy new_keys = set(new_dict.keys()) old_keys = set(old_dict.keys()) missing_keys = old_keys - new_keys extra_keys = new_keys - old_keys same_keys = new_keys.intersection(old_keys) for key in missing_keys: new_path = deepcopy(root) new_path.append(key) change_list.append({'action': 'DELETE', 'value': None, 'path': new_path}) for key in extra_keys: for k, v in new_dict.items(): if key == k: new_path = deepcopy(root) new_path.append(key) change_list.append({'action': 'ADD', 'value': v, 'path': new_path}) for key in same_keys: new_path = deepcopy(root) new_path.append(key) if new_dict[key].__class__ != old_dict[key].__class__: change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path}) elif isinstance(new_dict[key], dict): _compare_dict(new_dict[key], old_dict[key], change_list, new_path) elif isinstance(new_dict[key], list): _compare_list(new_dict[key], old_dict[key], change_list, new_path) elif isinstance(new_dict[key], set): _compare_set(new_dict[key], old_dict[key], change_list, new_path) elif new_dict[key] != old_dict[key]: change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path}) return change_list
python
def _compare_dict(new_dict, old_dict, change_list=None, root=None): ''' a method for recursively listing changes made to a dictionary :param new_dict: dictionary with new key-value pairs :param old_dict: dictionary with old key-value pairs :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy new_keys = set(new_dict.keys()) old_keys = set(old_dict.keys()) missing_keys = old_keys - new_keys extra_keys = new_keys - old_keys same_keys = new_keys.intersection(old_keys) for key in missing_keys: new_path = deepcopy(root) new_path.append(key) change_list.append({'action': 'DELETE', 'value': None, 'path': new_path}) for key in extra_keys: for k, v in new_dict.items(): if key == k: new_path = deepcopy(root) new_path.append(key) change_list.append({'action': 'ADD', 'value': v, 'path': new_path}) for key in same_keys: new_path = deepcopy(root) new_path.append(key) if new_dict[key].__class__ != old_dict[key].__class__: change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path}) elif isinstance(new_dict[key], dict): _compare_dict(new_dict[key], old_dict[key], change_list, new_path) elif isinstance(new_dict[key], list): _compare_list(new_dict[key], old_dict[key], change_list, new_path) elif isinstance(new_dict[key], set): _compare_set(new_dict[key], old_dict[key], change_list, new_path) elif new_dict[key] != old_dict[key]: change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path}) return change_list
[ "def", "_compare_dict", "(", "new_dict", ",", "old_dict", ",", "change_list", "=", "None", ",", "root", "=", "None", ")", ":", "from", "copy", "import", "deepcopy", "new_keys", "=", "set", "(", "new_dict", ".", "keys", "(", ")", ")", "old_keys", "=", "set", "(", "old_dict", ".", "keys", "(", ")", ")", "missing_keys", "=", "old_keys", "-", "new_keys", "extra_keys", "=", "new_keys", "-", "old_keys", "same_keys", "=", "new_keys", ".", "intersection", "(", "old_keys", ")", "for", "key", "in", "missing_keys", ":", "new_path", "=", "deepcopy", "(", "root", ")", "new_path", ".", "append", "(", "key", ")", "change_list", ".", "append", "(", "{", "'action'", ":", "'DELETE'", ",", "'value'", ":", "None", ",", "'path'", ":", "new_path", "}", ")", "for", "key", "in", "extra_keys", ":", "for", "k", ",", "v", "in", "new_dict", ".", "items", "(", ")", ":", "if", "key", "==", "k", ":", "new_path", "=", "deepcopy", "(", "root", ")", "new_path", ".", "append", "(", "key", ")", "change_list", ".", "append", "(", "{", "'action'", ":", "'ADD'", ",", "'value'", ":", "v", ",", "'path'", ":", "new_path", "}", ")", "for", "key", "in", "same_keys", ":", "new_path", "=", "deepcopy", "(", "root", ")", "new_path", ".", "append", "(", "key", ")", "if", "new_dict", "[", "key", "]", ".", "__class__", "!=", "old_dict", "[", "key", "]", ".", "__class__", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'UPDATE'", ",", "'value'", ":", "new_dict", "[", "key", "]", ",", "'path'", ":", "new_path", "}", ")", "elif", "isinstance", "(", "new_dict", "[", "key", "]", ",", "dict", ")", ":", "_compare_dict", "(", "new_dict", "[", "key", "]", ",", "old_dict", "[", "key", "]", ",", "change_list", ",", "new_path", ")", "elif", "isinstance", "(", "new_dict", "[", "key", "]", ",", "list", ")", ":", "_compare_list", "(", "new_dict", "[", "key", "]", ",", "old_dict", "[", "key", "]", ",", "change_list", ",", "new_path", ")", "elif", "isinstance", "(", "new_dict", "[", "key", "]", ",", "set", ")", ":", "_compare_set", "(", "new_dict", "[", "key", "]", ",", "old_dict", "[", "key", "]", ",", "change_list", ",", "new_path", ")", "elif", "new_dict", "[", "key", "]", "!=", "old_dict", "[", "key", "]", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'UPDATE'", ",", "'value'", ":", "new_dict", "[", "key", "]", ",", "'path'", ":", "new_path", "}", ")", "return", "change_list" ]
a method for recursively listing changes made to a dictionary :param new_dict: dictionary with new key-value pairs :param old_dict: dictionary with old key-value pairs :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new
[ "a", "method", "for", "recursively", "listing", "changes", "made", "to", "a", "dictionary" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/comparison.py#L36-L78
245,096
collectiveacuity/labPack
labpack/parsing/comparison.py
_compare_list
def _compare_list(new_list, old_list, change_list=None, root=None): ''' a method for recursively listing changes made to a list :param new_list: list with new value :param old_list: list with old values :param change_list: list of differences between old and new :param root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy if len(old_list) > len(new_list): same_len = len(new_list) for i in reversed(range(len(new_list), len(old_list))): new_path = deepcopy(root) new_path.append(i) change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path}) elif len(new_list) > len(old_list): same_len = len(old_list) append_list = [] path = deepcopy(root) for i in range(len(old_list), len(new_list)): append_list.append(new_list[i]) change_list.append({'action': 'APPEND', 'value': append_list, 'path': path}) else: same_len = len(new_list) for i in range(0, same_len): new_path = deepcopy(root) new_path.append(i) if new_list[i].__class__ != old_list[i].__class__: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) elif isinstance(new_list[i], dict): _compare_dict(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], list): _compare_list(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], set): _compare_set(new_list[i], old_list[i], change_list, new_path) elif new_list[i] != old_list[i]: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) return change_list
python
def _compare_list(new_list, old_list, change_list=None, root=None): ''' a method for recursively listing changes made to a list :param new_list: list with new value :param old_list: list with old values :param change_list: list of differences between old and new :param root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy if len(old_list) > len(new_list): same_len = len(new_list) for i in reversed(range(len(new_list), len(old_list))): new_path = deepcopy(root) new_path.append(i) change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path}) elif len(new_list) > len(old_list): same_len = len(old_list) append_list = [] path = deepcopy(root) for i in range(len(old_list), len(new_list)): append_list.append(new_list[i]) change_list.append({'action': 'APPEND', 'value': append_list, 'path': path}) else: same_len = len(new_list) for i in range(0, same_len): new_path = deepcopy(root) new_path.append(i) if new_list[i].__class__ != old_list[i].__class__: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) elif isinstance(new_list[i], dict): _compare_dict(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], list): _compare_list(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], set): _compare_set(new_list[i], old_list[i], change_list, new_path) elif new_list[i] != old_list[i]: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) return change_list
[ "def", "_compare_list", "(", "new_list", ",", "old_list", ",", "change_list", "=", "None", ",", "root", "=", "None", ")", ":", "from", "copy", "import", "deepcopy", "if", "len", "(", "old_list", ")", ">", "len", "(", "new_list", ")", ":", "same_len", "=", "len", "(", "new_list", ")", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "new_list", ")", ",", "len", "(", "old_list", ")", ")", ")", ":", "new_path", "=", "deepcopy", "(", "root", ")", "new_path", ".", "append", "(", "i", ")", "change_list", ".", "append", "(", "{", "'action'", ":", "'REMOVE'", ",", "'value'", ":", "None", ",", "'path'", ":", "new_path", "}", ")", "elif", "len", "(", "new_list", ")", ">", "len", "(", "old_list", ")", ":", "same_len", "=", "len", "(", "old_list", ")", "append_list", "=", "[", "]", "path", "=", "deepcopy", "(", "root", ")", "for", "i", "in", "range", "(", "len", "(", "old_list", ")", ",", "len", "(", "new_list", ")", ")", ":", "append_list", ".", "append", "(", "new_list", "[", "i", "]", ")", "change_list", ".", "append", "(", "{", "'action'", ":", "'APPEND'", ",", "'value'", ":", "append_list", ",", "'path'", ":", "path", "}", ")", "else", ":", "same_len", "=", "len", "(", "new_list", ")", "for", "i", "in", "range", "(", "0", ",", "same_len", ")", ":", "new_path", "=", "deepcopy", "(", "root", ")", "new_path", ".", "append", "(", "i", ")", "if", "new_list", "[", "i", "]", ".", "__class__", "!=", "old_list", "[", "i", "]", ".", "__class__", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'UPDATE'", ",", "'value'", ":", "new_list", "[", "i", "]", ",", "'path'", ":", "new_path", "}", ")", "elif", "isinstance", "(", "new_list", "[", "i", "]", ",", "dict", ")", ":", "_compare_dict", "(", "new_list", "[", "i", "]", ",", "old_list", "[", "i", "]", ",", "change_list", ",", "new_path", ")", "elif", "isinstance", "(", "new_list", "[", "i", "]", ",", "list", ")", ":", "_compare_list", "(", "new_list", "[", "i", "]", ",", "old_list", "[", "i", "]", ",", "change_list", ",", "new_path", ")", "elif", "isinstance", "(", "new_list", "[", "i", "]", ",", "set", ")", ":", "_compare_set", "(", "new_list", "[", "i", "]", ",", "old_list", "[", "i", "]", ",", "change_list", ",", "new_path", ")", "elif", "new_list", "[", "i", "]", "!=", "old_list", "[", "i", "]", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'UPDATE'", ",", "'value'", ":", "new_list", "[", "i", "]", ",", "'path'", ":", "new_path", "}", ")", "return", "change_list" ]
a method for recursively listing changes made to a list :param new_list: list with new value :param old_list: list with old values :param change_list: list of differences between old and new :param root: string with record of path to the root of the main object :return: list of differences between old and new
[ "a", "method", "for", "recursively", "listing", "changes", "made", "to", "a", "list" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/comparison.py#L80-L121
245,097
collectiveacuity/labPack
labpack/parsing/comparison.py
_compare_set
def _compare_set(new_set, old_set, change_list, root): ''' a method for list changes made to a set :param new_set: set with new values :param old_set: set with old values :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy path = deepcopy(root) missing_items = old_set - new_set extra_items = new_set - old_set for item in missing_items: change_list.append({'action': 'REMOVE', 'key': None, 'value': item, 'path': path}) for item in extra_items: change_list.append({'action': 'ADD', 'key': None, 'value': item, 'path': path}) return change_list
python
def _compare_set(new_set, old_set, change_list, root): ''' a method for list changes made to a set :param new_set: set with new values :param old_set: set with old values :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy path = deepcopy(root) missing_items = old_set - new_set extra_items = new_set - old_set for item in missing_items: change_list.append({'action': 'REMOVE', 'key': None, 'value': item, 'path': path}) for item in extra_items: change_list.append({'action': 'ADD', 'key': None, 'value': item, 'path': path}) return change_list
[ "def", "_compare_set", "(", "new_set", ",", "old_set", ",", "change_list", ",", "root", ")", ":", "from", "copy", "import", "deepcopy", "path", "=", "deepcopy", "(", "root", ")", "missing_items", "=", "old_set", "-", "new_set", "extra_items", "=", "new_set", "-", "old_set", "for", "item", "in", "missing_items", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'REMOVE'", ",", "'key'", ":", "None", ",", "'value'", ":", "item", ",", "'path'", ":", "path", "}", ")", "for", "item", "in", "extra_items", ":", "change_list", ".", "append", "(", "{", "'action'", ":", "'ADD'", ",", "'key'", ":", "None", ",", "'value'", ":", "item", ",", "'path'", ":", "path", "}", ")", "return", "change_list" ]
a method for list changes made to a set :param new_set: set with new values :param old_set: set with old values :param change_list: list of differences between old and new :patam root: string with record of path to the root of the main object :return: list of differences between old and new
[ "a", "method", "for", "list", "changes", "made", "to", "a", "set" ]
52949ece35e72e3cc308f54d9ffa6bfbd96805b8
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/parsing/comparison.py#L123-L143
245,098
dangunter/smoqe
smoqe/query.py
to_mongo
def to_mongo(qry): """Transform a simple query with one or more filter expressions into a MongoDB query expression. :param qry: Filter expression(s), see function docstring for details. :type qry: str or list :return: MongoDB query :rtype: dict :raises: BadExpression, if one of the input expressions cannot be parsed Expressions have three parts, called in order ``field``, ``operator``, and ``value``. - `field` is the name of a field in a MongoDB document - `value` is the value to compare against: * numeric * string, you MUST use 'single' or "double" quotes * boolean: true, false - `operator` is a comparison operator: * inequalities: >, <, =, <=, >=, != * PCRE regular expression: ~ * data type: int, float, string, or bool * exists: boolean (true/false) whether field exists in record * size: for array fields, an inequality for the array size, given as a suffix to the operator: size>, size< Multiple expressions can be a single string, or a list. In either case, the form is a "disjunction of conjunctions". In the string form: * "and" joins expressions into groups * "or" joins one or more expression groups In the list form: * The inner list is a group of "and"ed expressions * The outer list "or"s the expression groups together. In the string form, parentheses before or after the "or"ed expression groups [note: even non-sensical ones like '((('], are ignored. So these can be used to clarify the groupings. **Examples** Two sets of filters, return records where either is true: >>> to_mongo('(a > 3 and b = "hello") or (c > 1 and d = "goodbye")') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but without parentheses: >>> to_mongo('a > 3 and b = "hello" or c > 1 and d = "goodbye"') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but using lists rather than "and"/"or": >>> to_mongo([['a > 3', 'b = "hello"'], ['c > 1', 'd = "goodbye"']]) {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} """ rev = False # filters, not constraints # special case for empty string/list if qry == "" or qry == []: return {} # break input into groups of filters unpar = lambda s: s.strip().strip('()') if isinstance(qry, str): groups = [] if _TOK_OR in qry: groups = [unpar(g).split(_TOK_AND) for g in qry.split(_TOK_OR)] else: groups = [unpar(qry).split(_TOK_AND)] else: if isinstance(qry[0], list) or isinstance(qry[0], tuple): groups = qry else: groups = [qry] # generate mongodb queries for each filter group filters = [] for filter_exprs in groups: mq = MongoQuery() for e in filter_exprs: try: e = unpar(e) except AttributeError: raise BadExpression(e, "expected string, got '{t}'".format(t=type(e))) try: constraint = Constraint(*parse_expr(e)) except ValueError as err: raise BadExpression(e, err) clause = MongoClause(constraint, rev=rev) mq.add_clause(clause) filters.append(mq.to_mongo(rev)) # combine together filters, or strip down the one filter if len(filters) > 1: result = {'$or': filters} else: result = filters[0] return result
python
def to_mongo(qry): """Transform a simple query with one or more filter expressions into a MongoDB query expression. :param qry: Filter expression(s), see function docstring for details. :type qry: str or list :return: MongoDB query :rtype: dict :raises: BadExpression, if one of the input expressions cannot be parsed Expressions have three parts, called in order ``field``, ``operator``, and ``value``. - `field` is the name of a field in a MongoDB document - `value` is the value to compare against: * numeric * string, you MUST use 'single' or "double" quotes * boolean: true, false - `operator` is a comparison operator: * inequalities: >, <, =, <=, >=, != * PCRE regular expression: ~ * data type: int, float, string, or bool * exists: boolean (true/false) whether field exists in record * size: for array fields, an inequality for the array size, given as a suffix to the operator: size>, size< Multiple expressions can be a single string, or a list. In either case, the form is a "disjunction of conjunctions". In the string form: * "and" joins expressions into groups * "or" joins one or more expression groups In the list form: * The inner list is a group of "and"ed expressions * The outer list "or"s the expression groups together. In the string form, parentheses before or after the "or"ed expression groups [note: even non-sensical ones like '((('], are ignored. So these can be used to clarify the groupings. **Examples** Two sets of filters, return records where either is true: >>> to_mongo('(a > 3 and b = "hello") or (c > 1 and d = "goodbye")') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but without parentheses: >>> to_mongo('a > 3 and b = "hello" or c > 1 and d = "goodbye"') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but using lists rather than "and"/"or": >>> to_mongo([['a > 3', 'b = "hello"'], ['c > 1', 'd = "goodbye"']]) {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} """ rev = False # filters, not constraints # special case for empty string/list if qry == "" or qry == []: return {} # break input into groups of filters unpar = lambda s: s.strip().strip('()') if isinstance(qry, str): groups = [] if _TOK_OR in qry: groups = [unpar(g).split(_TOK_AND) for g in qry.split(_TOK_OR)] else: groups = [unpar(qry).split(_TOK_AND)] else: if isinstance(qry[0], list) or isinstance(qry[0], tuple): groups = qry else: groups = [qry] # generate mongodb queries for each filter group filters = [] for filter_exprs in groups: mq = MongoQuery() for e in filter_exprs: try: e = unpar(e) except AttributeError: raise BadExpression(e, "expected string, got '{t}'".format(t=type(e))) try: constraint = Constraint(*parse_expr(e)) except ValueError as err: raise BadExpression(e, err) clause = MongoClause(constraint, rev=rev) mq.add_clause(clause) filters.append(mq.to_mongo(rev)) # combine together filters, or strip down the one filter if len(filters) > 1: result = {'$or': filters} else: result = filters[0] return result
[ "def", "to_mongo", "(", "qry", ")", ":", "rev", "=", "False", "# filters, not constraints", "# special case for empty string/list", "if", "qry", "==", "\"\"", "or", "qry", "==", "[", "]", ":", "return", "{", "}", "# break input into groups of filters", "unpar", "=", "lambda", "s", ":", "s", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "if", "isinstance", "(", "qry", ",", "str", ")", ":", "groups", "=", "[", "]", "if", "_TOK_OR", "in", "qry", ":", "groups", "=", "[", "unpar", "(", "g", ")", ".", "split", "(", "_TOK_AND", ")", "for", "g", "in", "qry", ".", "split", "(", "_TOK_OR", ")", "]", "else", ":", "groups", "=", "[", "unpar", "(", "qry", ")", ".", "split", "(", "_TOK_AND", ")", "]", "else", ":", "if", "isinstance", "(", "qry", "[", "0", "]", ",", "list", ")", "or", "isinstance", "(", "qry", "[", "0", "]", ",", "tuple", ")", ":", "groups", "=", "qry", "else", ":", "groups", "=", "[", "qry", "]", "# generate mongodb queries for each filter group", "filters", "=", "[", "]", "for", "filter_exprs", "in", "groups", ":", "mq", "=", "MongoQuery", "(", ")", "for", "e", "in", "filter_exprs", ":", "try", ":", "e", "=", "unpar", "(", "e", ")", "except", "AttributeError", ":", "raise", "BadExpression", "(", "e", ",", "\"expected string, got '{t}'\"", ".", "format", "(", "t", "=", "type", "(", "e", ")", ")", ")", "try", ":", "constraint", "=", "Constraint", "(", "*", "parse_expr", "(", "e", ")", ")", "except", "ValueError", "as", "err", ":", "raise", "BadExpression", "(", "e", ",", "err", ")", "clause", "=", "MongoClause", "(", "constraint", ",", "rev", "=", "rev", ")", "mq", ".", "add_clause", "(", "clause", ")", "filters", ".", "append", "(", "mq", ".", "to_mongo", "(", "rev", ")", ")", "# combine together filters, or strip down the one filter", "if", "len", "(", "filters", ")", ">", "1", ":", "result", "=", "{", "'$or'", ":", "filters", "}", "else", ":", "result", "=", "filters", "[", "0", "]", "return", "result" ]
Transform a simple query with one or more filter expressions into a MongoDB query expression. :param qry: Filter expression(s), see function docstring for details. :type qry: str or list :return: MongoDB query :rtype: dict :raises: BadExpression, if one of the input expressions cannot be parsed Expressions have three parts, called in order ``field``, ``operator``, and ``value``. - `field` is the name of a field in a MongoDB document - `value` is the value to compare against: * numeric * string, you MUST use 'single' or "double" quotes * boolean: true, false - `operator` is a comparison operator: * inequalities: >, <, =, <=, >=, != * PCRE regular expression: ~ * data type: int, float, string, or bool * exists: boolean (true/false) whether field exists in record * size: for array fields, an inequality for the array size, given as a suffix to the operator: size>, size< Multiple expressions can be a single string, or a list. In either case, the form is a "disjunction of conjunctions". In the string form: * "and" joins expressions into groups * "or" joins one or more expression groups In the list form: * The inner list is a group of "and"ed expressions * The outer list "or"s the expression groups together. In the string form, parentheses before or after the "or"ed expression groups [note: even non-sensical ones like '((('], are ignored. So these can be used to clarify the groupings. **Examples** Two sets of filters, return records where either is true: >>> to_mongo('(a > 3 and b = "hello") or (c > 1 and d = "goodbye")') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but without parentheses: >>> to_mongo('a > 3 and b = "hello" or c > 1 and d = "goodbye"') {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but using lists rather than "and"/"or": >>> to_mongo([['a > 3', 'b = "hello"'], ['c > 1', 'd = "goodbye"']]) {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]}
[ "Transform", "a", "simple", "query", "with", "one", "or", "more", "filter", "expressions", "into", "a", "MongoDB", "query", "expression", "." ]
70aa8ec1e9df875b9d21c71cbded95c595fe2aad
https://github.com/dangunter/smoqe/blob/70aa8ec1e9df875b9d21c71cbded95c595fe2aad/smoqe/query.py#L28-L126
245,099
dangunter/smoqe
smoqe/query.py
parse_expr
def parse_expr(e): """Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple """ m = relation_re.match(e) if m is None: raise ValueError("error parsing expression '{}'".format(e)) field, op, val = m.groups() # Try different types try: # Integer val_int = int(val) val = val_int except ValueError: try: # Float val_float = float(val) val = val_float except ValueError: try: # Boolean val = {'true': True, 'false': False}[val.lower()] except KeyError: # String if re.match(r'".*"|\'.*\'', val): # strip quotes from strings val = val[1:-1] return field, op, val
python
def parse_expr(e): """Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple """ m = relation_re.match(e) if m is None: raise ValueError("error parsing expression '{}'".format(e)) field, op, val = m.groups() # Try different types try: # Integer val_int = int(val) val = val_int except ValueError: try: # Float val_float = float(val) val = val_float except ValueError: try: # Boolean val = {'true': True, 'false': False}[val.lower()] except KeyError: # String if re.match(r'".*"|\'.*\'', val): # strip quotes from strings val = val[1:-1] return field, op, val
[ "def", "parse_expr", "(", "e", ")", ":", "m", "=", "relation_re", ".", "match", "(", "e", ")", "if", "m", "is", "None", ":", "raise", "ValueError", "(", "\"error parsing expression '{}'\"", ".", "format", "(", "e", ")", ")", "field", ",", "op", ",", "val", "=", "m", ".", "groups", "(", ")", "# Try different types", "try", ":", "# Integer", "val_int", "=", "int", "(", "val", ")", "val", "=", "val_int", "except", "ValueError", ":", "try", ":", "# Float", "val_float", "=", "float", "(", "val", ")", "val", "=", "val_float", "except", "ValueError", ":", "try", ":", "# Boolean", "val", "=", "{", "'true'", ":", "True", ",", "'false'", ":", "False", "}", "[", "val", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "# String", "if", "re", ".", "match", "(", "r'\".*\"|\\'.*\\''", ",", "val", ")", ":", "# strip quotes from strings", "val", "=", "val", "[", "1", ":", "-", "1", "]", "return", "field", ",", "op", ",", "val" ]
Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple
[ "Parse", "a", "single", "constraint", "expression", "." ]
70aa8ec1e9df875b9d21c71cbded95c595fe2aad
https://github.com/dangunter/smoqe/blob/70aa8ec1e9df875b9d21c71cbded95c595fe2aad/smoqe/query.py#L144-L177