id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
243,900
TC01/calcpkg
calcrepo/repo.py
CalcRepository.searchIndex
def searchIndex(self, printData=True): """Search the index with all the repo's specified parameters""" backupValue = copy.deepcopy(self.output.printData) self.output.printData = printData self.data = self.index.search(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension) self.output.printData = backupValue return self.data
python
def searchIndex(self, printData=True): """Search the index with all the repo's specified parameters""" backupValue = copy.deepcopy(self.output.printData) self.output.printData = printData self.data = self.index.search(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension) self.output.printData = backupValue return self.data
[ "def", "searchIndex", "(", "self", ",", "printData", "=", "True", ")", ":", "backupValue", "=", "copy", ".", "deepcopy", "(", "self", ".", "output", ".", "printData", ")", "self", ".", "output", ".", "printData", "=", "printData", "self", ".", "data", "=", "self", ".", "index", ".", "search", "(", "self", ".", "searchString", ",", "self", ".", "category", ",", "self", ".", "math", ",", "self", ".", "game", ",", "self", ".", "searchFiles", ",", "self", ".", "extension", ")", "self", ".", "output", ".", "printData", "=", "backupValue", "return", "self", ".", "data" ]
Search the index with all the repo's specified parameters
[ "Search", "the", "index", "with", "all", "the", "repo", "s", "specified", "parameters" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L53-L59
243,901
TC01/calcpkg
calcrepo/repo.py
CalcRepository.countIndex
def countIndex(self): """A wrapper for the count function in calcrepo.index; count using specified parameters""" self.data = self.index.count(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension)
python
def countIndex(self): """A wrapper for the count function in calcrepo.index; count using specified parameters""" self.data = self.index.count(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension)
[ "def", "countIndex", "(", "self", ")", ":", "self", ".", "data", "=", "self", ".", "index", ".", "count", "(", "self", ".", "searchString", ",", "self", ".", "category", ",", "self", ".", "math", ",", "self", ".", "game", ",", "self", ".", "searchFiles", ",", "self", ".", "extension", ")" ]
A wrapper for the count function in calcrepo.index; count using specified parameters
[ "A", "wrapper", "for", "the", "count", "function", "in", "calcrepo", ".", "index", ";", "count", "using", "specified", "parameters" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L61-L63
243,902
TC01/calcpkg
calcrepo/repo.py
CalcRepository.getDownloadUrls
def getDownloadUrls(self): """Return a list of the urls to download from""" data = self.searchIndex(False) fileUrls = [] for datum in data: fileUrl = self.formatDownloadUrl(datum[0]) fileUrls.append(fileUrl) return fileUrls
python
def getDownloadUrls(self): """Return a list of the urls to download from""" data = self.searchIndex(False) fileUrls = [] for datum in data: fileUrl = self.formatDownloadUrl(datum[0]) fileUrls.append(fileUrl) return fileUrls
[ "def", "getDownloadUrls", "(", "self", ")", ":", "data", "=", "self", ".", "searchIndex", "(", "False", ")", "fileUrls", "=", "[", "]", "for", "datum", "in", "data", ":", "fileUrl", "=", "self", ".", "formatDownloadUrl", "(", "datum", "[", "0", "]", ")", "fileUrls", ".", "append", "(", "fileUrl", ")", "return", "fileUrls" ]
Return a list of the urls to download from
[ "Return", "a", "list", "of", "the", "urls", "to", "download", "from" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L65-L72
243,903
TC01/calcpkg
calcrepo/repo.py
CalcRepository.getFileInfos
def getFileInfos(self): """Return a list of FileInfo objects""" data = self.searchIndex(False) self.data = data self.printd(" ") fileInfos = [] for datum in data: try: fileInfo = self.getFileInfo(datum[0], datum[1]) fileInfos.append(fileInfo) except NotImplementedError: self.printd("Error: the info command is not supported for " + self.name + ".") return [] return fileInfos
python
def getFileInfos(self): """Return a list of FileInfo objects""" data = self.searchIndex(False) self.data = data self.printd(" ") fileInfos = [] for datum in data: try: fileInfo = self.getFileInfo(datum[0], datum[1]) fileInfos.append(fileInfo) except NotImplementedError: self.printd("Error: the info command is not supported for " + self.name + ".") return [] return fileInfos
[ "def", "getFileInfos", "(", "self", ")", ":", "data", "=", "self", ".", "searchIndex", "(", "False", ")", "self", ".", "data", "=", "data", "self", ".", "printd", "(", "\" \"", ")", "fileInfos", "=", "[", "]", "for", "datum", "in", "data", ":", "try", ":", "fileInfo", "=", "self", ".", "getFileInfo", "(", "datum", "[", "0", "]", ",", "datum", "[", "1", "]", ")", "fileInfos", ".", "append", "(", "fileInfo", ")", "except", "NotImplementedError", ":", "self", ".", "printd", "(", "\"Error: the info command is not supported for \"", "+", "self", ".", "name", "+", "\".\"", ")", "return", "[", "]", "return", "fileInfos" ]
Return a list of FileInfo objects
[ "Return", "a", "list", "of", "FileInfo", "objects" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L74-L87
243,904
TC01/calcpkg
calcrepo/repo.py
CalcRepository.downloadFiles
def downloadFiles(self, prompt=True, extract=False): """Download files from the repository""" #First, get the download urls data = self.data downloadUrls = self.getDownloadUrls() #Then, confirm the user wants to do this if prompt: confirm = raw_input("Download files [Y/N]? ") if confirm.lower() != 'y': self.printd("Operation aborted by user input") return #Now, if they still do, do all this stuff: counter = -1 for datum in data: counter += 1 try: download = downloadUrls[counter] except: pass # Download the file; fix our user agent self.printd("Downloading " + datum[0] + " from " + download) headers = { 'User-Agent' : 'calcpkg/2.0' } request = urllib2.Request(download, None, headers) fileData = urllib2.urlopen(request).read() # Now, process the downloaded file dowName = datum[0] # Use a helper function to remove /pub, /files dowName = util.removeRootFromName(dowName) dowName = dowName[1:] dowName = dowName.replace('/', '-') dowName = self.downloadDir + dowName try: downloaded = open(dowName, 'wb') except: os.remove(dowName) downloaded.write(fileData) downloaded.close() self.printd("Download complete! Wrote file " + dowName + "\n") #Extract them if told to do so if extract: extractType = "" if '.zip' in dowName: extractType = "zip" elif '.tar' in dowName: extractType = "tar" specType = "" if '.bz2' in dowName: specType = ":bz2" elif ".gz" in dowName: specType = ":gz" elif ".tgz" in dowName: extractType = "tar" specType = ":gz" if extractType != "": self.printd("Extracting file " + dowName + ", creating directory for extracted files") dirName, a, ending = dowName.partition('.') dirName = dirName + '-' + ending try: os.mkdir(dirName) except: pass if extractType == "zip": archive = zipfile.ZipFile(dowName, 'r') elif extractType == "tar": archive = tarfile.open(dowName, "r" + specType) else: self.printd("An unknown error has occured!") return archive.extractall(dirName) self.printd("All files in archive extracted to " + dirName) os.remove(dowName) self.printd("The archive file " + dowName + " has been deleted!\n")
python
def downloadFiles(self, prompt=True, extract=False): """Download files from the repository""" #First, get the download urls data = self.data downloadUrls = self.getDownloadUrls() #Then, confirm the user wants to do this if prompt: confirm = raw_input("Download files [Y/N]? ") if confirm.lower() != 'y': self.printd("Operation aborted by user input") return #Now, if they still do, do all this stuff: counter = -1 for datum in data: counter += 1 try: download = downloadUrls[counter] except: pass # Download the file; fix our user agent self.printd("Downloading " + datum[0] + " from " + download) headers = { 'User-Agent' : 'calcpkg/2.0' } request = urllib2.Request(download, None, headers) fileData = urllib2.urlopen(request).read() # Now, process the downloaded file dowName = datum[0] # Use a helper function to remove /pub, /files dowName = util.removeRootFromName(dowName) dowName = dowName[1:] dowName = dowName.replace('/', '-') dowName = self.downloadDir + dowName try: downloaded = open(dowName, 'wb') except: os.remove(dowName) downloaded.write(fileData) downloaded.close() self.printd("Download complete! Wrote file " + dowName + "\n") #Extract them if told to do so if extract: extractType = "" if '.zip' in dowName: extractType = "zip" elif '.tar' in dowName: extractType = "tar" specType = "" if '.bz2' in dowName: specType = ":bz2" elif ".gz" in dowName: specType = ":gz" elif ".tgz" in dowName: extractType = "tar" specType = ":gz" if extractType != "": self.printd("Extracting file " + dowName + ", creating directory for extracted files") dirName, a, ending = dowName.partition('.') dirName = dirName + '-' + ending try: os.mkdir(dirName) except: pass if extractType == "zip": archive = zipfile.ZipFile(dowName, 'r') elif extractType == "tar": archive = tarfile.open(dowName, "r" + specType) else: self.printd("An unknown error has occured!") return archive.extractall(dirName) self.printd("All files in archive extracted to " + dirName) os.remove(dowName) self.printd("The archive file " + dowName + " has been deleted!\n")
[ "def", "downloadFiles", "(", "self", ",", "prompt", "=", "True", ",", "extract", "=", "False", ")", ":", "#First, get the download urls", "data", "=", "self", ".", "data", "downloadUrls", "=", "self", ".", "getDownloadUrls", "(", ")", "#Then, confirm the user wants to do this", "if", "prompt", ":", "confirm", "=", "raw_input", "(", "\"Download files [Y/N]? \"", ")", "if", "confirm", ".", "lower", "(", ")", "!=", "'y'", ":", "self", ".", "printd", "(", "\"Operation aborted by user input\"", ")", "return", "#Now, if they still do, do all this stuff:", "counter", "=", "-", "1", "for", "datum", "in", "data", ":", "counter", "+=", "1", "try", ":", "download", "=", "downloadUrls", "[", "counter", "]", "except", ":", "pass", "# Download the file; fix our user agent", "self", ".", "printd", "(", "\"Downloading \"", "+", "datum", "[", "0", "]", "+", "\" from \"", "+", "download", ")", "headers", "=", "{", "'User-Agent'", ":", "'calcpkg/2.0'", "}", "request", "=", "urllib2", ".", "Request", "(", "download", ",", "None", ",", "headers", ")", "fileData", "=", "urllib2", ".", "urlopen", "(", "request", ")", ".", "read", "(", ")", "# Now, process the downloaded file", "dowName", "=", "datum", "[", "0", "]", "# Use a helper function to remove /pub, /files", "dowName", "=", "util", ".", "removeRootFromName", "(", "dowName", ")", "dowName", "=", "dowName", "[", "1", ":", "]", "dowName", "=", "dowName", ".", "replace", "(", "'/'", ",", "'-'", ")", "dowName", "=", "self", ".", "downloadDir", "+", "dowName", "try", ":", "downloaded", "=", "open", "(", "dowName", ",", "'wb'", ")", "except", ":", "os", ".", "remove", "(", "dowName", ")", "downloaded", ".", "write", "(", "fileData", ")", "downloaded", ".", "close", "(", ")", "self", ".", "printd", "(", "\"Download complete! Wrote file \"", "+", "dowName", "+", "\"\\n\"", ")", "#Extract them if told to do so", "if", "extract", ":", "extractType", "=", "\"\"", "if", "'.zip'", "in", "dowName", ":", "extractType", "=", "\"zip\"", "elif", "'.tar'", "in", "dowName", ":", "extractType", "=", "\"tar\"", "specType", "=", "\"\"", "if", "'.bz2'", "in", "dowName", ":", "specType", "=", "\":bz2\"", "elif", "\".gz\"", "in", "dowName", ":", "specType", "=", "\":gz\"", "elif", "\".tgz\"", "in", "dowName", ":", "extractType", "=", "\"tar\"", "specType", "=", "\":gz\"", "if", "extractType", "!=", "\"\"", ":", "self", ".", "printd", "(", "\"Extracting file \"", "+", "dowName", "+", "\", creating directory for extracted files\"", ")", "dirName", ",", "a", ",", "ending", "=", "dowName", ".", "partition", "(", "'.'", ")", "dirName", "=", "dirName", "+", "'-'", "+", "ending", "try", ":", "os", ".", "mkdir", "(", "dirName", ")", "except", ":", "pass", "if", "extractType", "==", "\"zip\"", ":", "archive", "=", "zipfile", ".", "ZipFile", "(", "dowName", ",", "'r'", ")", "elif", "extractType", "==", "\"tar\"", ":", "archive", "=", "tarfile", ".", "open", "(", "dowName", ",", "\"r\"", "+", "specType", ")", "else", ":", "self", ".", "printd", "(", "\"An unknown error has occured!\"", ")", "return", "archive", ".", "extractall", "(", "dirName", ")", "self", ".", "printd", "(", "\"All files in archive extracted to \"", "+", "dirName", ")", "os", ".", "remove", "(", "dowName", ")", "self", ".", "printd", "(", "\"The archive file \"", "+", "dowName", "+", "\" has been deleted!\\n\"", ")" ]
Download files from the repository
[ "Download", "files", "from", "the", "repository" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L89-L166
243,905
TC01/calcpkg
calcrepo/repo.py
CalcRepository.downloadFileFromUrl
def downloadFileFromUrl(self, url): """Given a URL, download the specified file""" fullurl = self.baseUrl + url try: urlobj = urllib2.urlopen(fullurl) contents = urlobj.read() except urllib2.HTTPError, e: self.printd("HTTP error:", e.code, url) return None except urllib2.URLError, e: self.printd("URL error:", e.code, url) return None self.printd("Fetched '%s' (size %d bytes)" % (fullurl, len(contents))) return contents
python
def downloadFileFromUrl(self, url): """Given a URL, download the specified file""" fullurl = self.baseUrl + url try: urlobj = urllib2.urlopen(fullurl) contents = urlobj.read() except urllib2.HTTPError, e: self.printd("HTTP error:", e.code, url) return None except urllib2.URLError, e: self.printd("URL error:", e.code, url) return None self.printd("Fetched '%s' (size %d bytes)" % (fullurl, len(contents))) return contents
[ "def", "downloadFileFromUrl", "(", "self", ",", "url", ")", ":", "fullurl", "=", "self", ".", "baseUrl", "+", "url", "try", ":", "urlobj", "=", "urllib2", ".", "urlopen", "(", "fullurl", ")", "contents", "=", "urlobj", ".", "read", "(", ")", "except", "urllib2", ".", "HTTPError", ",", "e", ":", "self", ".", "printd", "(", "\"HTTP error:\"", ",", "e", ".", "code", ",", "url", ")", "return", "None", "except", "urllib2", ".", "URLError", ",", "e", ":", "self", ".", "printd", "(", "\"URL error:\"", ",", "e", ".", "code", ",", "url", ")", "return", "None", "self", ".", "printd", "(", "\"Fetched '%s' (size %d bytes)\"", "%", "(", "fullurl", ",", "len", "(", "contents", ")", ")", ")", "return", "contents" ]
Given a URL, download the specified file
[ "Given", "a", "URL", "download", "the", "specified", "file" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L185-L198
243,906
TC01/calcpkg
calcrepo/repo.py
CalcRepository.openIndex
def openIndex(self, filename, description): """Attempt to delete and recreate an index, returns open file object or None.""" try: os.remove(filename) self.printd(" Deleted old " + description) except: self.printd(" No " + description + " found") # Now, attempt to open a new index try: files = open(filename, 'wt') except: self.printd("Error: Unable to create file " + filename + " in current folder. Quitting.") return None return files
python
def openIndex(self, filename, description): """Attempt to delete and recreate an index, returns open file object or None.""" try: os.remove(filename) self.printd(" Deleted old " + description) except: self.printd(" No " + description + " found") # Now, attempt to open a new index try: files = open(filename, 'wt') except: self.printd("Error: Unable to create file " + filename + " in current folder. Quitting.") return None return files
[ "def", "openIndex", "(", "self", ",", "filename", ",", "description", ")", ":", "try", ":", "os", ".", "remove", "(", "filename", ")", "self", ".", "printd", "(", "\" Deleted old \"", "+", "description", ")", "except", ":", "self", ".", "printd", "(", "\" No \"", "+", "description", "+", "\" found\"", ")", "# Now, attempt to open a new index", "try", ":", "files", "=", "open", "(", "filename", ",", "'wt'", ")", "except", ":", "self", ".", "printd", "(", "\"Error: Unable to create file \"", "+", "filename", "+", "\" in current folder. Quitting.\"", ")", "return", "None", "return", "files" ]
Attempt to delete and recreate an index, returns open file object or None.
[ "Attempt", "to", "delete", "and", "recreate", "an", "index", "returns", "open", "file", "object", "or", "None", "." ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repo.py#L200-L214
243,907
rikrd/inspire
inspirespeech/htk_model.py
load_model
def load_model(*args): """Load an HTK model from one ore more files. :param args: Filenames of the model (e.g. macros hmmdefs) :return: The model as an OrderedDict() """ text = '' for fnm in args: text += open(fnm).read() text += '\n' parser = htk_model_parser.htk_modelParser() model = HtkModelSemantics() return parser.parse(text, rule_name='model', ignorecase=True, semantics=model, comments_re="\(\*.*?\*\)", trace=False)
python
def load_model(*args): """Load an HTK model from one ore more files. :param args: Filenames of the model (e.g. macros hmmdefs) :return: The model as an OrderedDict() """ text = '' for fnm in args: text += open(fnm).read() text += '\n' parser = htk_model_parser.htk_modelParser() model = HtkModelSemantics() return parser.parse(text, rule_name='model', ignorecase=True, semantics=model, comments_re="\(\*.*?\*\)", trace=False)
[ "def", "load_model", "(", "*", "args", ")", ":", "text", "=", "''", "for", "fnm", "in", "args", ":", "text", "+=", "open", "(", "fnm", ")", ".", "read", "(", ")", "text", "+=", "'\\n'", "parser", "=", "htk_model_parser", ".", "htk_modelParser", "(", ")", "model", "=", "HtkModelSemantics", "(", ")", "return", "parser", ".", "parse", "(", "text", ",", "rule_name", "=", "'model'", ",", "ignorecase", "=", "True", ",", "semantics", "=", "model", ",", "comments_re", "=", "\"\\(\\*.*?\\*\\)\"", ",", "trace", "=", "False", ")" ]
Load an HTK model from one ore more files. :param args: Filenames of the model (e.g. macros hmmdefs) :return: The model as an OrderedDict()
[ "Load", "an", "HTK", "model", "from", "one", "ore", "more", "files", "." ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model.py#L85-L103
243,908
rikrd/inspire
inspirespeech/htk_model.py
save_model
def save_model(model, filename): """Save the model into a file. :param model: HTK model to be saved :param filename: File where to save the model """ with open(filename, 'w') as f: f.write(serialize_model(model))
python
def save_model(model, filename): """Save the model into a file. :param model: HTK model to be saved :param filename: File where to save the model """ with open(filename, 'w') as f: f.write(serialize_model(model))
[ "def", "save_model", "(", "model", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "serialize_model", "(", "model", ")", ")" ]
Save the model into a file. :param model: HTK model to be saved :param filename: File where to save the model
[ "Save", "the", "model", "into", "a", "file", "." ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model.py#L306-L313
243,909
rikrd/inspire
inspirespeech/htk_model.py
serialize_model
def serialize_model(model): """Serialize the HTK model into a file. :param model: Model to be serialized """ result = '' # First serialize the macros for macro in model['macros']: if macro.get('options', None): result += '~o ' for option in macro['options']['definition']: result += _serialize_option(option) elif macro.get('transition', None): result += '~t "{}"\n'.format(macro['transition']['name']) result += _serialize_transp(macro['transition']['definition']) elif macro.get('variance', None): result += '~v "{}"\n'.format(macro['variance']['name']) result += _serialize_variance(macro['variance']['definition']) elif macro.get('state', None): result += '~s "{}"\n'.format(macro['state']['name']) result += _serialize_stateinfo(macro['state']['definition']) elif macro.get('mean', None): result += '~u "{}"\n'.format(macro['mean']['name']) result += _serialize_mean(macro['mean']['definition']) elif macro.get('duration', None): result += '~d "{}"\n'.format(macro['duration']['name']) result += _serialize_duration(macro['duration']['definition']) else: raise NotImplementedError('Cannot serialize {}'.format(macro)) for hmm in model['hmms']: if hmm.get('name', None) is not None: result += '~h "{}"\n'.format(hmm['name']) result += _serialize_hmm(hmm['definition']) return result
python
def serialize_model(model): """Serialize the HTK model into a file. :param model: Model to be serialized """ result = '' # First serialize the macros for macro in model['macros']: if macro.get('options', None): result += '~o ' for option in macro['options']['definition']: result += _serialize_option(option) elif macro.get('transition', None): result += '~t "{}"\n'.format(macro['transition']['name']) result += _serialize_transp(macro['transition']['definition']) elif macro.get('variance', None): result += '~v "{}"\n'.format(macro['variance']['name']) result += _serialize_variance(macro['variance']['definition']) elif macro.get('state', None): result += '~s "{}"\n'.format(macro['state']['name']) result += _serialize_stateinfo(macro['state']['definition']) elif macro.get('mean', None): result += '~u "{}"\n'.format(macro['mean']['name']) result += _serialize_mean(macro['mean']['definition']) elif macro.get('duration', None): result += '~d "{}"\n'.format(macro['duration']['name']) result += _serialize_duration(macro['duration']['definition']) else: raise NotImplementedError('Cannot serialize {}'.format(macro)) for hmm in model['hmms']: if hmm.get('name', None) is not None: result += '~h "{}"\n'.format(hmm['name']) result += _serialize_hmm(hmm['definition']) return result
[ "def", "serialize_model", "(", "model", ")", ":", "result", "=", "''", "# First serialize the macros", "for", "macro", "in", "model", "[", "'macros'", "]", ":", "if", "macro", ".", "get", "(", "'options'", ",", "None", ")", ":", "result", "+=", "'~o '", "for", "option", "in", "macro", "[", "'options'", "]", "[", "'definition'", "]", ":", "result", "+=", "_serialize_option", "(", "option", ")", "elif", "macro", ".", "get", "(", "'transition'", ",", "None", ")", ":", "result", "+=", "'~t \"{}\"\\n'", ".", "format", "(", "macro", "[", "'transition'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_transp", "(", "macro", "[", "'transition'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'variance'", ",", "None", ")", ":", "result", "+=", "'~v \"{}\"\\n'", ".", "format", "(", "macro", "[", "'variance'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_variance", "(", "macro", "[", "'variance'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'state'", ",", "None", ")", ":", "result", "+=", "'~s \"{}\"\\n'", ".", "format", "(", "macro", "[", "'state'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_stateinfo", "(", "macro", "[", "'state'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'mean'", ",", "None", ")", ":", "result", "+=", "'~u \"{}\"\\n'", ".", "format", "(", "macro", "[", "'mean'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_mean", "(", "macro", "[", "'mean'", "]", "[", "'definition'", "]", ")", "elif", "macro", ".", "get", "(", "'duration'", ",", "None", ")", ":", "result", "+=", "'~d \"{}\"\\n'", ".", "format", "(", "macro", "[", "'duration'", "]", "[", "'name'", "]", ")", "result", "+=", "_serialize_duration", "(", "macro", "[", "'duration'", "]", "[", "'definition'", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "'Cannot serialize {}'", ".", "format", "(", "macro", ")", ")", "for", "hmm", "in", "model", "[", "'hmms'", "]", ":", "if", "hmm", ".", "get", "(", "'name'", ",", "None", ")", "is", "not", "None", ":", "result", "+=", "'~h \"{}\"\\n'", ".", "format", "(", "hmm", "[", "'name'", "]", ")", "result", "+=", "_serialize_hmm", "(", "hmm", "[", "'definition'", "]", ")", "return", "result" ]
Serialize the HTK model into a file. :param model: Model to be serialized
[ "Serialize", "the", "HTK", "model", "into", "a", "file", "." ]
e281c0266a9a9633f34ab70f9c3ad58036c19b59
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/htk_model.py#L316-L360
243,910
sassoo/goldman
goldman/api.py
API._load_resources
def _load_resources(self): """ Load all the native goldman resources. The route or API endpoint will be automatically determined based on the resource object instance passed in. INFO: Only our Model based resources are supported when auto-generating API endpoints. """ for resource in self.RESOURCES: if isinstance(resource, goldman.ModelsResource): route = '/%s' % resource.rtype elif isinstance(resource, goldman.ModelResource): route = '/%s/{rid}' % resource.rtype elif isinstance(resource, goldman.RelatedResource): route = '/%s/{rid}/{related}' % resource.rtype else: raise TypeError('unsupported resource type') self.add_route(*(route, resource))
python
def _load_resources(self): """ Load all the native goldman resources. The route or API endpoint will be automatically determined based on the resource object instance passed in. INFO: Only our Model based resources are supported when auto-generating API endpoints. """ for resource in self.RESOURCES: if isinstance(resource, goldman.ModelsResource): route = '/%s' % resource.rtype elif isinstance(resource, goldman.ModelResource): route = '/%s/{rid}' % resource.rtype elif isinstance(resource, goldman.RelatedResource): route = '/%s/{rid}/{related}' % resource.rtype else: raise TypeError('unsupported resource type') self.add_route(*(route, resource))
[ "def", "_load_resources", "(", "self", ")", ":", "for", "resource", "in", "self", ".", "RESOURCES", ":", "if", "isinstance", "(", "resource", ",", "goldman", ".", "ModelsResource", ")", ":", "route", "=", "'/%s'", "%", "resource", ".", "rtype", "elif", "isinstance", "(", "resource", ",", "goldman", ".", "ModelResource", ")", ":", "route", "=", "'/%s/{rid}'", "%", "resource", ".", "rtype", "elif", "isinstance", "(", "resource", ",", "goldman", ".", "RelatedResource", ")", ":", "route", "=", "'/%s/{rid}/{related}'", "%", "resource", ".", "rtype", "else", ":", "raise", "TypeError", "(", "'unsupported resource type'", ")", "self", ".", "add_route", "(", "*", "(", "route", ",", "resource", ")", ")" ]
Load all the native goldman resources. The route or API endpoint will be automatically determined based on the resource object instance passed in. INFO: Only our Model based resources are supported when auto-generating API endpoints.
[ "Load", "all", "the", "native", "goldman", "resources", "." ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/api.py#L55-L75
243,911
sassoo/goldman
goldman/api.py
API._error_serializer
def _error_serializer(req, exc): # pylint: disable=unused-argument """ Serializer for native falcon HTTPError exceptions. We override the default serializer with our own so we can ensure the errors are serialized in a JSON API compliant format. Surprisingly, most falcon error attributes map directly to the JSON API spec. The few that don't can be mapped accordingly: HTTPError JSON API ~~~~~~~~~ ~~~~~~~~ exc.description -> error['detail'] exc.link['href'] -> error['links']['about'] Per the falcon docs this function should return a tuple of (MIMETYPE, BODY PAYLOAD) """ error = { 'detail': exc.description, 'title': exc.title, 'status': exc.status, } try: error['links'] = {'about': exc.link['href']} except (TypeError, KeyError): error['links'] = {'about': ''} return ( goldman.config.JSONAPI_MIMETYPE, json.dumps({'errors': [error]}), )
python
def _error_serializer(req, exc): # pylint: disable=unused-argument """ Serializer for native falcon HTTPError exceptions. We override the default serializer with our own so we can ensure the errors are serialized in a JSON API compliant format. Surprisingly, most falcon error attributes map directly to the JSON API spec. The few that don't can be mapped accordingly: HTTPError JSON API ~~~~~~~~~ ~~~~~~~~ exc.description -> error['detail'] exc.link['href'] -> error['links']['about'] Per the falcon docs this function should return a tuple of (MIMETYPE, BODY PAYLOAD) """ error = { 'detail': exc.description, 'title': exc.title, 'status': exc.status, } try: error['links'] = {'about': exc.link['href']} except (TypeError, KeyError): error['links'] = {'about': ''} return ( goldman.config.JSONAPI_MIMETYPE, json.dumps({'errors': [error]}), )
[ "def", "_error_serializer", "(", "req", ",", "exc", ")", ":", "# pylint: disable=unused-argument", "error", "=", "{", "'detail'", ":", "exc", ".", "description", ",", "'title'", ":", "exc", ".", "title", ",", "'status'", ":", "exc", ".", "status", ",", "}", "try", ":", "error", "[", "'links'", "]", "=", "{", "'about'", ":", "exc", ".", "link", "[", "'href'", "]", "}", "except", "(", "TypeError", ",", "KeyError", ")", ":", "error", "[", "'links'", "]", "=", "{", "'about'", ":", "''", "}", "return", "(", "goldman", ".", "config", ".", "JSONAPI_MIMETYPE", ",", "json", ".", "dumps", "(", "{", "'errors'", ":", "[", "error", "]", "}", ")", ",", ")" ]
Serializer for native falcon HTTPError exceptions. We override the default serializer with our own so we can ensure the errors are serialized in a JSON API compliant format. Surprisingly, most falcon error attributes map directly to the JSON API spec. The few that don't can be mapped accordingly: HTTPError JSON API ~~~~~~~~~ ~~~~~~~~ exc.description -> error['detail'] exc.link['href'] -> error['links']['about'] Per the falcon docs this function should return a tuple of (MIMETYPE, BODY PAYLOAD)
[ "Serializer", "for", "native", "falcon", "HTTPError", "exceptions", "." ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/api.py#L108-L145
243,912
lambdalisue/maidenhair
src/maidenhair/utils/peakset.py
find_peakset
def find_peakset(dataset, basecolumn=-1, method='', where=None): """ Find peakset from the dataset Parameters ----------- dataset : list A list of data basecolumn : int An index of column for finding peaks method : str A method name of numpy for finding peaks where : function A function which recieve ``data`` and return numpy indexing list Returns ------- list A list of peaks of each axis (list) """ peakset = [] where_i = None for data in dataset: base = data[basecolumn] base = maidenhair.statistics.average(base) # limit data points if where: adata = [maidenhair.statistics.average(x) for x in data] where_i = np.where(where(adata)) base = base[where_i] # find peak index index = getattr(np, method, np.argmax)(base) # create peakset for a, axis in enumerate(data): if len(peakset) <= a: peakset.append([]) if where_i: axis = axis[where_i] peakset[a].append(axis[index]) peakset = np.array(peakset) return peakset
python
def find_peakset(dataset, basecolumn=-1, method='', where=None): """ Find peakset from the dataset Parameters ----------- dataset : list A list of data basecolumn : int An index of column for finding peaks method : str A method name of numpy for finding peaks where : function A function which recieve ``data`` and return numpy indexing list Returns ------- list A list of peaks of each axis (list) """ peakset = [] where_i = None for data in dataset: base = data[basecolumn] base = maidenhair.statistics.average(base) # limit data points if where: adata = [maidenhair.statistics.average(x) for x in data] where_i = np.where(where(adata)) base = base[where_i] # find peak index index = getattr(np, method, np.argmax)(base) # create peakset for a, axis in enumerate(data): if len(peakset) <= a: peakset.append([]) if where_i: axis = axis[where_i] peakset[a].append(axis[index]) peakset = np.array(peakset) return peakset
[ "def", "find_peakset", "(", "dataset", ",", "basecolumn", "=", "-", "1", ",", "method", "=", "''", ",", "where", "=", "None", ")", ":", "peakset", "=", "[", "]", "where_i", "=", "None", "for", "data", "in", "dataset", ":", "base", "=", "data", "[", "basecolumn", "]", "base", "=", "maidenhair", ".", "statistics", ".", "average", "(", "base", ")", "# limit data points", "if", "where", ":", "adata", "=", "[", "maidenhair", ".", "statistics", ".", "average", "(", "x", ")", "for", "x", "in", "data", "]", "where_i", "=", "np", ".", "where", "(", "where", "(", "adata", ")", ")", "base", "=", "base", "[", "where_i", "]", "# find peak index", "index", "=", "getattr", "(", "np", ",", "method", ",", "np", ".", "argmax", ")", "(", "base", ")", "# create peakset", "for", "a", ",", "axis", "in", "enumerate", "(", "data", ")", ":", "if", "len", "(", "peakset", ")", "<=", "a", ":", "peakset", ".", "append", "(", "[", "]", ")", "if", "where_i", ":", "axis", "=", "axis", "[", "where_i", "]", "peakset", "[", "a", "]", ".", "append", "(", "axis", "[", "index", "]", ")", "peakset", "=", "np", ".", "array", "(", "peakset", ")", "return", "peakset" ]
Find peakset from the dataset Parameters ----------- dataset : list A list of data basecolumn : int An index of column for finding peaks method : str A method name of numpy for finding peaks where : function A function which recieve ``data`` and return numpy indexing list Returns ------- list A list of peaks of each axis (list)
[ "Find", "peakset", "from", "the", "dataset" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/utils/peakset.py#L9-L49
243,913
praekeltfoundation/seed-control-interface-service
seed_control_interface_service/utils.py
normalise_string
def normalise_string(string): """ Strips trailing whitespace from string, lowercases it and replaces spaces with underscores """ string = (string.strip()).lower() return re.sub(r'\W+', '_', string)
python
def normalise_string(string): """ Strips trailing whitespace from string, lowercases it and replaces spaces with underscores """ string = (string.strip()).lower() return re.sub(r'\W+', '_', string)
[ "def", "normalise_string", "(", "string", ")", ":", "string", "=", "(", "string", ".", "strip", "(", ")", ")", ".", "lower", "(", ")", "return", "re", ".", "sub", "(", "r'\\W+'", ",", "'_'", ",", "string", ")" ]
Strips trailing whitespace from string, lowercases it and replaces spaces with underscores
[ "Strips", "trailing", "whitespace", "from", "string", "lowercases", "it", "and", "replaces", "spaces", "with", "underscores" ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/seed_control_interface_service/utils.py#L6-L11
243,914
praekeltfoundation/seed-control-interface-service
seed_control_interface_service/utils.py
json_decode
def json_decode(data): """ Decodes the given JSON as primitives """ if isinstance(data, six.binary_type): data = data.decode('utf-8') return json.loads(data)
python
def json_decode(data): """ Decodes the given JSON as primitives """ if isinstance(data, six.binary_type): data = data.decode('utf-8') return json.loads(data)
[ "def", "json_decode", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "six", ".", "binary_type", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "return", "json", ".", "loads", "(", "data", ")" ]
Decodes the given JSON as primitives
[ "Decodes", "the", "given", "JSON", "as", "primitives" ]
0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e
https://github.com/praekeltfoundation/seed-control-interface-service/blob/0c8ec58ae61e72d4443e6c9a4d8b7dd12dd8a86e/seed_control_interface_service/utils.py#L14-L21
243,915
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/logging.py
BasicLogger.write
def write(self, *args): '''Write convenience function; writes strings. ''' for s in args: self.out.write(s) event = ''.join(*args)
python
def write(self, *args): '''Write convenience function; writes strings. ''' for s in args: self.out.write(s) event = ''.join(*args)
[ "def", "write", "(", "self", ",", "*", "args", ")", ":", "for", "s", "in", "args", ":", "self", ".", "out", ".", "write", "(", "s", ")", "event", "=", "''", ".", "join", "(", "*", "args", ")" ]
Write convenience function; writes strings.
[ "Write", "convenience", "function", ";", "writes", "strings", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/logging.py#L65-L69
243,916
pip-services3-python/pip-services3-components-python
pip_services3_components/count/LogCounters.py
LogCounters._save
def _save(self, counters): """ Saves the current counters measurements. :param counters: current counters measurements to be saves. """ if self._logger == None: return if len(counters) == 0: return # Sort counters by name counters = sorted(counters, key=LogCounters._get_counter_name) for counter in counters: self._logger.info("counters", self._counter_to_string(counter))
python
def _save(self, counters): """ Saves the current counters measurements. :param counters: current counters measurements to be saves. """ if self._logger == None: return if len(counters) == 0: return # Sort counters by name counters = sorted(counters, key=LogCounters._get_counter_name) for counter in counters: self._logger.info("counters", self._counter_to_string(counter))
[ "def", "_save", "(", "self", ",", "counters", ")", ":", "if", "self", ".", "_logger", "==", "None", ":", "return", "if", "len", "(", "counters", ")", "==", "0", ":", "return", "# Sort counters by name", "counters", "=", "sorted", "(", "counters", ",", "key", "=", "LogCounters", ".", "_get_counter_name", ")", "for", "counter", "in", "counters", ":", "self", ".", "_logger", ".", "info", "(", "\"counters\"", ",", "self", ".", "_counter_to_string", "(", "counter", ")", ")" ]
Saves the current counters measurements. :param counters: current counters measurements to be saves.
[ "Saves", "the", "current", "counters", "measurements", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/count/LogCounters.py#L87-L102
243,917
noirbizarre/minibench
minibench/utils.py
humanize
def humanize(text): '''Transform code conventions to human readable strings''' words = [] for part in text.split('_'): for word in RE_CAMEL.findall(part) or [part]: words.append(word.lower()) words[0] = words[0].title() return ' '.join(words)
python
def humanize(text): '''Transform code conventions to human readable strings''' words = [] for part in text.split('_'): for word in RE_CAMEL.findall(part) or [part]: words.append(word.lower()) words[0] = words[0].title() return ' '.join(words)
[ "def", "humanize", "(", "text", ")", ":", "words", "=", "[", "]", "for", "part", "in", "text", ".", "split", "(", "'_'", ")", ":", "for", "word", "in", "RE_CAMEL", ".", "findall", "(", "part", ")", "or", "[", "part", "]", ":", "words", ".", "append", "(", "word", ".", "lower", "(", ")", ")", "words", "[", "0", "]", "=", "words", "[", "0", "]", ".", "title", "(", ")", "return", "' '", ".", "join", "(", "words", ")" ]
Transform code conventions to human readable strings
[ "Transform", "code", "conventions", "to", "human", "readable", "strings" ]
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/utils.py#L9-L16
243,918
mbodenhamer/syn
syn/types/a/ne.py
is_visit_primitive
def is_visit_primitive(obj): '''Returns true if properly visiting the object returns only the object itself.''' from .base import visit if (isinstance(obj, tuple(PRIMITIVE_TYPES)) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return True if (isinstance(obj, CONTAINERS) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return False if isinstance(obj, STR) or isinstance(obj, bytes): if len(obj) == 1: return True return False return list(visit(obj, max_enum=2)) == [obj]
python
def is_visit_primitive(obj): '''Returns true if properly visiting the object returns only the object itself.''' from .base import visit if (isinstance(obj, tuple(PRIMITIVE_TYPES)) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return True if (isinstance(obj, CONTAINERS) and not isinstance(obj, STR) and not isinstance(obj, bytes)): return False if isinstance(obj, STR) or isinstance(obj, bytes): if len(obj) == 1: return True return False return list(visit(obj, max_enum=2)) == [obj]
[ "def", "is_visit_primitive", "(", "obj", ")", ":", "from", ".", "base", "import", "visit", "if", "(", "isinstance", "(", "obj", ",", "tuple", "(", "PRIMITIVE_TYPES", ")", ")", "and", "not", "isinstance", "(", "obj", ",", "STR", ")", "and", "not", "isinstance", "(", "obj", ",", "bytes", ")", ")", ":", "return", "True", "if", "(", "isinstance", "(", "obj", ",", "CONTAINERS", ")", "and", "not", "isinstance", "(", "obj", ",", "STR", ")", "and", "not", "isinstance", "(", "obj", ",", "bytes", ")", ")", ":", "return", "False", "if", "isinstance", "(", "obj", ",", "STR", ")", "or", "isinstance", "(", "obj", ",", "bytes", ")", ":", "if", "len", "(", "obj", ")", "==", "1", ":", "return", "True", "return", "False", "return", "list", "(", "visit", "(", "obj", ",", "max_enum", "=", "2", ")", ")", "==", "[", "obj", "]" ]
Returns true if properly visiting the object returns only the object itself.
[ "Returns", "true", "if", "properly", "visiting", "the", "object", "returns", "only", "the", "object", "itself", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/types/a/ne.py#L516-L529
243,919
pip-services3-python/pip-services3-components-python
pip_services3_components/cache/MemoryCache.py
MemoryCache.clear
def clear(self, correlation_id): """ Clears component state. :param correlation_id: (optional) transaction id to trace execution through call chain. """ self._lock.acquire() try: self._cache = {} finally: self._lock.release()
python
def clear(self, correlation_id): """ Clears component state. :param correlation_id: (optional) transaction id to trace execution through call chain. """ self._lock.acquire() try: self._cache = {} finally: self._lock.release()
[ "def", "clear", "(", "self", ",", "correlation_id", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "_cache", "=", "{", "}", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
Clears component state. :param correlation_id: (optional) transaction id to trace execution through call chain.
[ "Clears", "component", "state", "." ]
1de9c1bb544cf1891111e9a5f5d67653f62c9b52
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/cache/MemoryCache.py#L181-L191
243,920
ronaldguillen/wave
wave/decorators.py
rest_verbs
def rest_verbs(http_method_names=None): """ Decorator that converts a function-based view into an RestView subclass. Takes a list of allowed methods for the view as an argument. """ http_method_names = ['GET'] if (http_method_names is None) else http_method_names def decorator(func): WrappedRestView = type( six.PY3 and 'WrappedRestView' or b'WrappedRestView', (RestView,), {'__doc__': func.__doc__} ) # Note, the above allows us to set the docstring. # It is the equivalent of: # # class WrappedRestView(RestView): # pass # WrappedRestView.__doc__ = func.doc <--- Not possible to do this # api_view applied without (method_names) assert not(isinstance(http_method_names, types.FunctionType)), \ '@api_view missing list of allowed HTTP methods' # api_view applied with eg. string instead of list of strings assert isinstance(http_method_names, (list, tuple)), \ '@api_view expected a list of strings, received %s' % type(http_method_names).__name__ allowed_methods = set(http_method_names) | set(('options',)) WrappedRestView.http_method_names = [method.lower() for method in allowed_methods] def handler(self, *args, **kwargs): return func(*args, **kwargs) for method in http_method_names: setattr(WrappedRestView, method.lower(), handler) WrappedRestView.__name__ = func.__name__ WrappedRestView.renderer_classes = getattr(func, 'renderer_classes', RestView.renderer_classes) WrappedRestView.parser_classes = getattr(func, 'parser_classes', RestView.parser_classes) WrappedRestView.authentication_classes = getattr(func, 'authentication_classes', RestView.authentication_classes) WrappedRestView.throttle_classes = getattr(func, 'throttle_classes', RestView.throttle_classes) WrappedRestView.permission_classes = getattr(func, 'permission_classes', RestView.permission_classes) return WrappedRestView.as_view() return decorator
python
def rest_verbs(http_method_names=None): """ Decorator that converts a function-based view into an RestView subclass. Takes a list of allowed methods for the view as an argument. """ http_method_names = ['GET'] if (http_method_names is None) else http_method_names def decorator(func): WrappedRestView = type( six.PY3 and 'WrappedRestView' or b'WrappedRestView', (RestView,), {'__doc__': func.__doc__} ) # Note, the above allows us to set the docstring. # It is the equivalent of: # # class WrappedRestView(RestView): # pass # WrappedRestView.__doc__ = func.doc <--- Not possible to do this # api_view applied without (method_names) assert not(isinstance(http_method_names, types.FunctionType)), \ '@api_view missing list of allowed HTTP methods' # api_view applied with eg. string instead of list of strings assert isinstance(http_method_names, (list, tuple)), \ '@api_view expected a list of strings, received %s' % type(http_method_names).__name__ allowed_methods = set(http_method_names) | set(('options',)) WrappedRestView.http_method_names = [method.lower() for method in allowed_methods] def handler(self, *args, **kwargs): return func(*args, **kwargs) for method in http_method_names: setattr(WrappedRestView, method.lower(), handler) WrappedRestView.__name__ = func.__name__ WrappedRestView.renderer_classes = getattr(func, 'renderer_classes', RestView.renderer_classes) WrappedRestView.parser_classes = getattr(func, 'parser_classes', RestView.parser_classes) WrappedRestView.authentication_classes = getattr(func, 'authentication_classes', RestView.authentication_classes) WrappedRestView.throttle_classes = getattr(func, 'throttle_classes', RestView.throttle_classes) WrappedRestView.permission_classes = getattr(func, 'permission_classes', RestView.permission_classes) return WrappedRestView.as_view() return decorator
[ "def", "rest_verbs", "(", "http_method_names", "=", "None", ")", ":", "http_method_names", "=", "[", "'GET'", "]", "if", "(", "http_method_names", "is", "None", ")", "else", "http_method_names", "def", "decorator", "(", "func", ")", ":", "WrappedRestView", "=", "type", "(", "six", ".", "PY3", "and", "'WrappedRestView'", "or", "b'WrappedRestView'", ",", "(", "RestView", ",", ")", ",", "{", "'__doc__'", ":", "func", ".", "__doc__", "}", ")", "# Note, the above allows us to set the docstring.", "# It is the equivalent of:", "#", "# class WrappedRestView(RestView):", "# pass", "# WrappedRestView.__doc__ = func.doc <--- Not possible to do this", "# api_view applied without (method_names)", "assert", "not", "(", "isinstance", "(", "http_method_names", ",", "types", ".", "FunctionType", ")", ")", ",", "'@api_view missing list of allowed HTTP methods'", "# api_view applied with eg. string instead of list of strings", "assert", "isinstance", "(", "http_method_names", ",", "(", "list", ",", "tuple", ")", ")", ",", "'@api_view expected a list of strings, received %s'", "%", "type", "(", "http_method_names", ")", ".", "__name__", "allowed_methods", "=", "set", "(", "http_method_names", ")", "|", "set", "(", "(", "'options'", ",", ")", ")", "WrappedRestView", ".", "http_method_names", "=", "[", "method", ".", "lower", "(", ")", "for", "method", "in", "allowed_methods", "]", "def", "handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "method", "in", "http_method_names", ":", "setattr", "(", "WrappedRestView", ",", "method", ".", "lower", "(", ")", ",", "handler", ")", "WrappedRestView", ".", "__name__", "=", "func", ".", "__name__", "WrappedRestView", ".", "renderer_classes", "=", "getattr", "(", "func", ",", "'renderer_classes'", ",", "RestView", ".", "renderer_classes", ")", "WrappedRestView", ".", "parser_classes", "=", "getattr", "(", "func", ",", "'parser_classes'", ",", "RestView", ".", "parser_classes", ")", "WrappedRestView", ".", "authentication_classes", "=", "getattr", "(", "func", ",", "'authentication_classes'", ",", "RestView", ".", "authentication_classes", ")", "WrappedRestView", ".", "throttle_classes", "=", "getattr", "(", "func", ",", "'throttle_classes'", ",", "RestView", ".", "throttle_classes", ")", "WrappedRestView", ".", "permission_classes", "=", "getattr", "(", "func", ",", "'permission_classes'", ",", "RestView", ".", "permission_classes", ")", "return", "WrappedRestView", ".", "as_view", "(", ")", "return", "decorator" ]
Decorator that converts a function-based view into an RestView subclass. Takes a list of allowed methods for the view as an argument.
[ "Decorator", "that", "converts", "a", "function", "-", "based", "view", "into", "an", "RestView", "subclass", ".", "Takes", "a", "list", "of", "allowed", "methods", "for", "the", "view", "as", "an", "argument", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/decorators.py#L18-L76
243,921
Bouni/swisshydrodata
swisshydrodata/__init__.py
SwissHydroData.get_stations
def get_stations(self): """ Return a list of all stations IDs """ request = requests.get("{}/stations".format( self.base_url)) if request.status_code != 200: return None return request.json()
python
def get_stations(self): """ Return a list of all stations IDs """ request = requests.get("{}/stations".format( self.base_url)) if request.status_code != 200: return None return request.json()
[ "def", "get_stations", "(", "self", ")", ":", "request", "=", "requests", ".", "get", "(", "\"{}/stations\"", ".", "format", "(", "self", ".", "base_url", ")", ")", "if", "request", ".", "status_code", "!=", "200", ":", "return", "None", "return", "request", ".", "json", "(", ")" ]
Return a list of all stations IDs
[ "Return", "a", "list", "of", "all", "stations", "IDs" ]
6434e5927560e53b6b6b5d9c1c85de5cc351da3a
https://github.com/Bouni/swisshydrodata/blob/6434e5927560e53b6b6b5d9c1c85de5cc351da3a/swisshydrodata/__init__.py#L21-L27
243,922
Bouni/swisshydrodata
swisshydrodata/__init__.py
SwissHydroData.get_station_temperature_datetime
def get_station_temperature_datetime(self, station_id): """ Return temperature measurement datetime for a given station """ request = requests.get( "{}/station/{}/parameters/temperature/datetime".format( self.base_url, station_id)) if request.status_code != 200: return None return datetime.strptime(request.json(), "%Y-%m-%dT%H:%M:%S")
python
def get_station_temperature_datetime(self, station_id): """ Return temperature measurement datetime for a given station """ request = requests.get( "{}/station/{}/parameters/temperature/datetime".format( self.base_url, station_id)) if request.status_code != 200: return None return datetime.strptime(request.json(), "%Y-%m-%dT%H:%M:%S")
[ "def", "get_station_temperature_datetime", "(", "self", ",", "station_id", ")", ":", "request", "=", "requests", ".", "get", "(", "\"{}/station/{}/parameters/temperature/datetime\"", ".", "format", "(", "self", ".", "base_url", ",", "station_id", ")", ")", "if", "request", ".", "status_code", "!=", "200", ":", "return", "None", "return", "datetime", ".", "strptime", "(", "request", ".", "json", "(", ")", ",", "\"%Y-%m-%dT%H:%M:%S\"", ")" ]
Return temperature measurement datetime for a given station
[ "Return", "temperature", "measurement", "datetime", "for", "a", "given", "station" ]
6434e5927560e53b6b6b5d9c1c85de5cc351da3a
https://github.com/Bouni/swisshydrodata/blob/6434e5927560e53b6b6b5d9c1c85de5cc351da3a/swisshydrodata/__init__.py#L94-L101
243,923
Bouni/swisshydrodata
swisshydrodata/__init__.py
SwissHydroData.get_station_temperature_value
def get_station_temperature_value(self, station_id): """ Return temperature value for a given station """ request = requests.get( "{}/station/{}/parameters/temperature/value".format( self.base_url, station_id)) if request.status_code != 200: return None return request.json()
python
def get_station_temperature_value(self, station_id): """ Return temperature value for a given station """ request = requests.get( "{}/station/{}/parameters/temperature/value".format( self.base_url, station_id)) if request.status_code != 200: return None return request.json()
[ "def", "get_station_temperature_value", "(", "self", ",", "station_id", ")", ":", "request", "=", "requests", ".", "get", "(", "\"{}/station/{}/parameters/temperature/value\"", ".", "format", "(", "self", ".", "base_url", ",", "station_id", ")", ")", "if", "request", ".", "status_code", "!=", "200", ":", "return", "None", "return", "request", ".", "json", "(", ")" ]
Return temperature value for a given station
[ "Return", "temperature", "value", "for", "a", "given", "station" ]
6434e5927560e53b6b6b5d9c1c85de5cc351da3a
https://github.com/Bouni/swisshydrodata/blob/6434e5927560e53b6b6b5d9c1c85de5cc351da3a/swisshydrodata/__init__.py#L103-L110
243,924
lumpywizard/check_email_status
check_email_status/__init__.py
check_email_status
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None): """ Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict """ domain = recipient_address[recipient_address.find('@') + 1:] if helo_hostname is None: helo_hostname = domain ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."} records = [] try: records = mx_resolver.get_mx_records(helo_hostname) except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup." smtp = smtplib.SMTP(timeout=smtp_timeout) for mx in records: try: connection_status, connection_message = smtp.connect(mx.exchange) if connection_status == 220: smtp.helo(domain) smtp.mail(sender_address) status, message = smtp.rcpt(recipient_address) ret['status'] = status pattern = re.compile('(\d+\.\d+\.\d+)') matches = re.match(pattern, message) if matches: ret['extended_status'] = matches.group(1) ret['message'] = message smtp.quit() break except smtplib.SMTPConnectError: ret['status'] = 111 ret['message'] = "Connection refused or unable to open an SMTP stream." except smtplib.SMTPServerDisconnected: ret['status'] = 111 ret['extended_status'] = "SMTP Server disconnected" except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed." return ret
python
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None): """ Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict """ domain = recipient_address[recipient_address.find('@') + 1:] if helo_hostname is None: helo_hostname = domain ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."} records = [] try: records = mx_resolver.get_mx_records(helo_hostname) except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup." smtp = smtplib.SMTP(timeout=smtp_timeout) for mx in records: try: connection_status, connection_message = smtp.connect(mx.exchange) if connection_status == 220: smtp.helo(domain) smtp.mail(sender_address) status, message = smtp.rcpt(recipient_address) ret['status'] = status pattern = re.compile('(\d+\.\d+\.\d+)') matches = re.match(pattern, message) if matches: ret['extended_status'] = matches.group(1) ret['message'] = message smtp.quit() break except smtplib.SMTPConnectError: ret['status'] = 111 ret['message'] = "Connection refused or unable to open an SMTP stream." except smtplib.SMTPServerDisconnected: ret['status'] = 111 ret['extended_status'] = "SMTP Server disconnected" except socket.gaierror: ret['status'] = 512 ret['extended_status'] = "5.1.2 Domain name address resolution failed." return ret
[ "def", "check_email_status", "(", "mx_resolver", ",", "recipient_address", ",", "sender_address", ",", "smtp_timeout", "=", "10", ",", "helo_hostname", "=", "None", ")", ":", "domain", "=", "recipient_address", "[", "recipient_address", ".", "find", "(", "'@'", ")", "+", "1", ":", "]", "if", "helo_hostname", "is", "None", ":", "helo_hostname", "=", "domain", "ret", "=", "{", "'status'", ":", "101", ",", "'extended_status'", ":", "None", ",", "'message'", ":", "\"The server is unable to connect.\"", "}", "records", "=", "[", "]", "try", ":", "records", "=", "mx_resolver", ".", "get_mx_records", "(", "helo_hostname", ")", "except", "socket", ".", "gaierror", ":", "ret", "[", "'status'", "]", "=", "512", "ret", "[", "'extended_status'", "]", "=", "\"5.1.2 Domain name address resolution failed in MX lookup.\"", "smtp", "=", "smtplib", ".", "SMTP", "(", "timeout", "=", "smtp_timeout", ")", "for", "mx", "in", "records", ":", "try", ":", "connection_status", ",", "connection_message", "=", "smtp", ".", "connect", "(", "mx", ".", "exchange", ")", "if", "connection_status", "==", "220", ":", "smtp", ".", "helo", "(", "domain", ")", "smtp", ".", "mail", "(", "sender_address", ")", "status", ",", "message", "=", "smtp", ".", "rcpt", "(", "recipient_address", ")", "ret", "[", "'status'", "]", "=", "status", "pattern", "=", "re", ".", "compile", "(", "'(\\d+\\.\\d+\\.\\d+)'", ")", "matches", "=", "re", ".", "match", "(", "pattern", ",", "message", ")", "if", "matches", ":", "ret", "[", "'extended_status'", "]", "=", "matches", ".", "group", "(", "1", ")", "ret", "[", "'message'", "]", "=", "message", "smtp", ".", "quit", "(", ")", "break", "except", "smtplib", ".", "SMTPConnectError", ":", "ret", "[", "'status'", "]", "=", "111", "ret", "[", "'message'", "]", "=", "\"Connection refused or unable to open an SMTP stream.\"", "except", "smtplib", ".", "SMTPServerDisconnected", ":", "ret", "[", "'status'", "]", "=", "111", "ret", "[", "'extended_status'", "]", "=", "\"SMTP Server disconnected\"", "except", "socket", ".", "gaierror", ":", "ret", "[", "'status'", "]", "=", "512", "ret", "[", "'extended_status'", "]", "=", "\"5.1.2 Domain name address resolution failed.\"", "return", "ret" ]
Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict
[ "Checks", "if", "an", "email", "might", "be", "valid", "by", "getting", "the", "status", "from", "the", "SMTP", "server", "." ]
3a4c5dc42ada61325d5d9baad9e2b1b78084ee2f
https://github.com/lumpywizard/check_email_status/blob/3a4c5dc42ada61325d5d9baad9e2b1b78084ee2f/check_email_status/__init__.py#L6-L59
243,925
acsone/streamingxmlwriter
streamingxmlwriter/core.py
_StreamingXMLWriter.start_namespace
def start_namespace(self, prefix, uri): """ Declare a namespace prefix Use prefix=None to set the default namespace. """ self._g.startPrefixMapping(prefix, uri) self._ns[prefix] = uri
python
def start_namespace(self, prefix, uri): """ Declare a namespace prefix Use prefix=None to set the default namespace. """ self._g.startPrefixMapping(prefix, uri) self._ns[prefix] = uri
[ "def", "start_namespace", "(", "self", ",", "prefix", ",", "uri", ")", ":", "self", ".", "_g", ".", "startPrefixMapping", "(", "prefix", ",", "uri", ")", "self", ".", "_ns", "[", "prefix", "]", "=", "uri" ]
Declare a namespace prefix Use prefix=None to set the default namespace.
[ "Declare", "a", "namespace", "prefix" ]
68a7f48c4edf1fb310cf2518f7d183ab8d5dfe42
https://github.com/acsone/streamingxmlwriter/blob/68a7f48c4edf1fb310cf2518f7d183ab8d5dfe42/streamingxmlwriter/core.py#L174-L180
243,926
acsone/streamingxmlwriter
streamingxmlwriter/core.py
_StreamingXMLWriter.end_namespace
def end_namespace(self, prefix): """ Undeclare a namespace prefix. """ del self._ns[prefix] self._g.endPrefixMapping(prefix)
python
def end_namespace(self, prefix): """ Undeclare a namespace prefix. """ del self._ns[prefix] self._g.endPrefixMapping(prefix)
[ "def", "end_namespace", "(", "self", ",", "prefix", ")", ":", "del", "self", ".", "_ns", "[", "prefix", "]", "self", ".", "_g", ".", "endPrefixMapping", "(", "prefix", ")" ]
Undeclare a namespace prefix.
[ "Undeclare", "a", "namespace", "prefix", "." ]
68a7f48c4edf1fb310cf2518f7d183ab8d5dfe42
https://github.com/acsone/streamingxmlwriter/blob/68a7f48c4edf1fb310cf2518f7d183ab8d5dfe42/streamingxmlwriter/core.py#L182-L185
243,927
rosenbrockc/acorn
server/ui/views.py
_get_colors
def _get_colors(n): """Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form. """ import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
python
def _get_colors(n): """Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form. """ import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
[ "def", "_get_colors", "(", "n", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "matplotlib", ".", "colors", "import", "rgb2hex", "as", "r2h", "from", "numpy", "import", "linspace", "cols", "=", "linspace", "(", "0.05", ",", ".95", ",", "n", ")", "cmap", "=", "plt", ".", "get_cmap", "(", "'nipy_spectral'", ")", "return", "[", "r2h", "(", "cmap", "(", "i", ")", ")", "for", "i", "in", "cols", "]" ]
Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form.
[ "Returns", "n", "unique", "and", "evenly", "spaced", "colors", "for", "the", "backgrounds", "of", "the", "projects", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/server/ui/views.py#L12-L29
243,928
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
build_prefixes
def build_prefixes(namespaces=None): """Internal function takes a list of prefix, namespace uri tuples and generates a SPARQL PREFIX string. Args: namespaces(list): List of tuples, defaults to BIBFRAME and Schema.org Returns: string """ if namespaces is None: namespaces = [ ('bf', str(BIBFRAME)), ('schema', str(SCHEMA_ORG)) ] output = "PREFIX {}: <{}>\n".format( namespaces[0][0], namespaces[0][1]) if len(namespaces) == 1: return output else: for namespace in namespaces[1:]: output += "PREFIX {}: <{}>\n".format(namespace[0], namespace[1]) return output
python
def build_prefixes(namespaces=None): """Internal function takes a list of prefix, namespace uri tuples and generates a SPARQL PREFIX string. Args: namespaces(list): List of tuples, defaults to BIBFRAME and Schema.org Returns: string """ if namespaces is None: namespaces = [ ('bf', str(BIBFRAME)), ('schema', str(SCHEMA_ORG)) ] output = "PREFIX {}: <{}>\n".format( namespaces[0][0], namespaces[0][1]) if len(namespaces) == 1: return output else: for namespace in namespaces[1:]: output += "PREFIX {}: <{}>\n".format(namespace[0], namespace[1]) return output
[ "def", "build_prefixes", "(", "namespaces", "=", "None", ")", ":", "if", "namespaces", "is", "None", ":", "namespaces", "=", "[", "(", "'bf'", ",", "str", "(", "BIBFRAME", ")", ")", ",", "(", "'schema'", ",", "str", "(", "SCHEMA_ORG", ")", ")", "]", "output", "=", "\"PREFIX {}: <{}>\\n\"", ".", "format", "(", "namespaces", "[", "0", "]", "[", "0", "]", ",", "namespaces", "[", "0", "]", "[", "1", "]", ")", "if", "len", "(", "namespaces", ")", "==", "1", ":", "return", "output", "else", ":", "for", "namespace", "in", "namespaces", "[", "1", ":", "]", ":", "output", "+=", "\"PREFIX {}: <{}>\\n\"", ".", "format", "(", "namespace", "[", "0", "]", ",", "namespace", "[", "1", "]", ")", "return", "output" ]
Internal function takes a list of prefix, namespace uri tuples and generates a SPARQL PREFIX string. Args: namespaces(list): List of tuples, defaults to BIBFRAME and Schema.org Returns: string
[ "Internal", "function", "takes", "a", "list", "of", "prefix", "namespace", "uri", "tuples", "and", "generates", "a", "SPARQL", "PREFIX", "string", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L55-L79
243,929
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
copy_graph
def copy_graph(subject, existing_graph): """Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph """ new_graph = rdflib.Graph() for predicate, object_ in existing_graph.predicate_objects(): new_graph.add((subject, predicate, object_)) return new_graph
python
def copy_graph(subject, existing_graph): """Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph """ new_graph = rdflib.Graph() for predicate, object_ in existing_graph.predicate_objects(): new_graph.add((subject, predicate, object_)) return new_graph
[ "def", "copy_graph", "(", "subject", ",", "existing_graph", ")", ":", "new_graph", "=", "rdflib", ".", "Graph", "(", ")", "for", "predicate", ",", "object_", "in", "existing_graph", ".", "predicate_objects", "(", ")", ":", "new_graph", ".", "add", "(", "(", "subject", ",", "predicate", ",", "object_", ")", ")", "return", "new_graph" ]
Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph
[ "Function", "takes", "a", "subject", "and", "an", "existing", "graph", "returns", "a", "new", "graph", "with", "all", "predicate", "and", "objects", "of", "the", "existing", "graph", "copied", "to", "the", "new_graph", "with", "subject", "as", "the", "new", "subject" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L81-L96
243,930
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.init_app
def init_app(self, app): """ Initializes a Flask app object for the extension. Args: app(Flask): Flask app """ app.config.setdefault('FEDORA_BASE_URL', 'http://localhost:8080') if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)
python
def init_app(self, app): """ Initializes a Flask app object for the extension. Args: app(Flask): Flask app """ app.config.setdefault('FEDORA_BASE_URL', 'http://localhost:8080') if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "app", ".", "config", ".", "setdefault", "(", "'FEDORA_BASE_URL'", ",", "'http://localhost:8080'", ")", "if", "hasattr", "(", "app", ",", "'teardown_appcontext'", ")", ":", "app", ".", "teardown_appcontext", "(", "self", ".", "teardown", ")", "else", ":", "app", ".", "teardown_request", "(", "self", ".", "teardown", ")" ]
Initializes a Flask app object for the extension. Args: app(Flask): Flask app
[ "Initializes", "a", "Flask", "app", "object", "for", "the", "extension", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L202-L213
243,931
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.create_transaction
def create_transaction(self): """Method creates a new transaction resource and sets instance's transaction.""" request = urlllib.request.urlopen( urllib.parse.urljoin(self.base_url, 'fcr:tx')) self.transaction = request.read()
python
def create_transaction(self): """Method creates a new transaction resource and sets instance's transaction.""" request = urlllib.request.urlopen( urllib.parse.urljoin(self.base_url, 'fcr:tx')) self.transaction = request.read()
[ "def", "create_transaction", "(", "self", ")", ":", "request", "=", "urlllib", ".", "request", ".", "urlopen", "(", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "base_url", ",", "'fcr:tx'", ")", ")", "self", ".", "transaction", "=", "request", ".", "read", "(", ")" ]
Method creates a new transaction resource and sets instance's transaction.
[ "Method", "creates", "a", "new", "transaction", "resource", "and", "sets", "instance", "s", "transaction", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L215-L220
243,932
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.connect
def connect(self, fedora_url, data=None, method='Get'): """Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora """ if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
python
def connect(self, fedora_url, data=None, method='Get'): """Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora """ if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
[ "def", "connect", "(", "self", ",", "fedora_url", ",", "data", "=", "None", ",", "method", "=", "'Get'", ")", ":", "if", "data", "is", "None", ":", "data", "=", "{", "}", "if", "not", "fedora_url", ".", "startswith", "(", "\"http\"", ")", ":", "fedora_url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "base_url", ",", "fedora_url", ")", "request", "=", "urllib", ".", "request", ".", "Request", "(", "fedora_url", ",", "method", "=", "method", ")", "request", ".", "add_header", "(", "'Accept'", ",", "'text/turtle'", ")", "request", ".", "add_header", "(", "'Content-Type'", ",", "'text/turtle'", ")", "if", "len", "(", "data", ")", ">", "0", ":", "request", ".", "data", "=", "data", "try", ":", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "request", ")", "except", "urllib", ".", "error", ".", "URLError", "as", "err", ":", "if", "hasattr", "(", "err", ",", "'reason'", ")", ":", "print", "(", "\"failed to reach server at {} with {} method\"", ".", "format", "(", "fedora_url", ",", "request", ".", "method", ")", ")", "print", "(", "\"Reason: \"", ",", "err", ".", "reason", ")", "print", "(", "\"Data: \"", ",", "data", ")", "elif", "hasattr", "(", "err", ",", "'code'", ")", ":", "print", "(", "\"Server error {}\"", ".", "format", "(", "err", ".", "code", ")", ")", "raise", "err", "return", "response" ]
Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora
[ "Method", "attempts", "to", "connect", "to", "REST", "servers", "of", "the", "Fedora", "Commons", "repository", "using", "optional", "data", "parameter", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L223-L261
243,933
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.as_json
def as_json(self, entity_url, context=None): """Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object """ try: urllib.request.urlopen(entity_url) except urllib.error.HTTPError: raise ValueError("Cannot open {}".format(entity_url)) entity_graph = self.read(entity_url) entity_json = json.loads( entity_graph.serialize( format='json-ld', context=context).decode()) return json.dumps(entity_json)
python
def as_json(self, entity_url, context=None): """Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object """ try: urllib.request.urlopen(entity_url) except urllib.error.HTTPError: raise ValueError("Cannot open {}".format(entity_url)) entity_graph = self.read(entity_url) entity_json = json.loads( entity_graph.serialize( format='json-ld', context=context).decode()) return json.dumps(entity_json)
[ "def", "as_json", "(", "self", ",", "entity_url", ",", "context", "=", "None", ")", ":", "try", ":", "urllib", ".", "request", ".", "urlopen", "(", "entity_url", ")", "except", "urllib", ".", "error", ".", "HTTPError", ":", "raise", "ValueError", "(", "\"Cannot open {}\"", ".", "format", "(", "entity_url", ")", ")", "entity_graph", "=", "self", ".", "read", "(", "entity_url", ")", "entity_json", "=", "json", ".", "loads", "(", "entity_graph", ".", "serialize", "(", "format", "=", "'json-ld'", ",", "context", "=", "context", ")", ".", "decode", "(", ")", ")", "return", "json", ".", "dumps", "(", "entity_json", ")" ]
Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object
[ "Method", "takes", "a", "entity", "uri", "and", "attempts", "to", "return", "the", "Fedora", "Object", "as", "a", "JSON", "-", "LD", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L263-L285
243,934
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.create
def create(self, uri=None, graph=None, data=None): """Method takes an optional URI and graph, first checking if the URL is already present in Fedora, if not, creates a Fedora Object with the graph as properties. If URI is None, uses Fedora 4 default PID minter to create the object's URI. Args: uri(string): String of URI, default is None graph(rdflib.Graph): RDF Graph of subject, default is None data(object): Binary datastream that will be saved as fcr:content Returns: URI(string): New Fedora URI or None if uri already exists """ if uri is not None: existing_entity = self.__dedup__(rdflib.URIRef(uri), graph) if existing_entity is not None: return # Returns nothing else: default_request = urllib.request.Request( "/".join([self.base_url, "rest"]), method='POST') uri = urllib.request.urlopen(default_request).read().decode() if graph is not None: new_graph = copy_graph(rdflib.URIRef(uri), graph) create_response = self.connect( uri, data=new_graph.serialize(format='turtle'), method='PUT') raw_response = create_response.read() return uri
python
def create(self, uri=None, graph=None, data=None): """Method takes an optional URI and graph, first checking if the URL is already present in Fedora, if not, creates a Fedora Object with the graph as properties. If URI is None, uses Fedora 4 default PID minter to create the object's URI. Args: uri(string): String of URI, default is None graph(rdflib.Graph): RDF Graph of subject, default is None data(object): Binary datastream that will be saved as fcr:content Returns: URI(string): New Fedora URI or None if uri already exists """ if uri is not None: existing_entity = self.__dedup__(rdflib.URIRef(uri), graph) if existing_entity is not None: return # Returns nothing else: default_request = urllib.request.Request( "/".join([self.base_url, "rest"]), method='POST') uri = urllib.request.urlopen(default_request).read().decode() if graph is not None: new_graph = copy_graph(rdflib.URIRef(uri), graph) create_response = self.connect( uri, data=new_graph.serialize(format='turtle'), method='PUT') raw_response = create_response.read() return uri
[ "def", "create", "(", "self", ",", "uri", "=", "None", ",", "graph", "=", "None", ",", "data", "=", "None", ")", ":", "if", "uri", "is", "not", "None", ":", "existing_entity", "=", "self", ".", "__dedup__", "(", "rdflib", ".", "URIRef", "(", "uri", ")", ",", "graph", ")", "if", "existing_entity", "is", "not", "None", ":", "return", "# Returns nothing", "else", ":", "default_request", "=", "urllib", ".", "request", ".", "Request", "(", "\"/\"", ".", "join", "(", "[", "self", ".", "base_url", ",", "\"rest\"", "]", ")", ",", "method", "=", "'POST'", ")", "uri", "=", "urllib", ".", "request", ".", "urlopen", "(", "default_request", ")", ".", "read", "(", ")", ".", "decode", "(", ")", "if", "graph", "is", "not", "None", ":", "new_graph", "=", "copy_graph", "(", "rdflib", ".", "URIRef", "(", "uri", ")", ",", "graph", ")", "create_response", "=", "self", ".", "connect", "(", "uri", ",", "data", "=", "new_graph", ".", "serialize", "(", "format", "=", "'turtle'", ")", ",", "method", "=", "'PUT'", ")", "raw_response", "=", "create_response", ".", "read", "(", ")", "return", "uri" ]
Method takes an optional URI and graph, first checking if the URL is already present in Fedora, if not, creates a Fedora Object with the graph as properties. If URI is None, uses Fedora 4 default PID minter to create the object's URI. Args: uri(string): String of URI, default is None graph(rdflib.Graph): RDF Graph of subject, default is None data(object): Binary datastream that will be saved as fcr:content Returns: URI(string): New Fedora URI or None if uri already exists
[ "Method", "takes", "an", "optional", "URI", "and", "graph", "first", "checking", "if", "the", "URL", "is", "already", "present", "in", "Fedora", "if", "not", "creates", "a", "Fedora", "Object", "with", "the", "graph", "as", "properties", ".", "If", "URI", "is", "None", "uses", "Fedora", "4", "default", "PID", "minter", "to", "create", "the", "object", "s", "URI", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L288-L318
243,935
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.delete
def delete(self, uri): """Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object """ try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
python
def delete(self, uri): """Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object """ try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
[ "def", "delete", "(", "self", ",", "uri", ")", ":", "try", ":", "self", ".", "connect", "(", "uri", ",", "method", "=", "'DELETE'", ")", "return", "True", "except", "urllib", ".", "error", ".", "HTTPError", ":", "return", "False" ]
Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object
[ "Method", "deletes", "a", "Fedora", "Object", "in", "the", "repository" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L321-L331
243,936
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.exists
def exists(self, uri): """Method returns true is the entity exists in the Repository, false, otherwise Args: uri(str): Entity URI Returns: bool """ ##entity_uri = "/".join([self.base_url, entity_id]) try: urllib.request.urlopen(uri) return True except urllib.error.HTTPError: return False
python
def exists(self, uri): """Method returns true is the entity exists in the Repository, false, otherwise Args: uri(str): Entity URI Returns: bool """ ##entity_uri = "/".join([self.base_url, entity_id]) try: urllib.request.urlopen(uri) return True except urllib.error.HTTPError: return False
[ "def", "exists", "(", "self", ",", "uri", ")", ":", "##entity_uri = \"/\".join([self.base_url, entity_id])", "try", ":", "urllib", ".", "request", ".", "urlopen", "(", "uri", ")", "return", "True", "except", "urllib", ".", "error", ".", "HTTPError", ":", "return", "False" ]
Method returns true is the entity exists in the Repository, false, otherwise Args: uri(str): Entity URI Returns: bool
[ "Method", "returns", "true", "is", "the", "entity", "exists", "in", "the", "Repository", "false", "otherwise" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L335-L350
243,937
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.flush
def flush(self): """Method flushes repository, deleting all objects""" base_graph = rdflib.Graph().parse('{}/rest'.format(self.base_url)) has_child = rdflib.URIRef( 'http://fedora.info/definitions/v4/repository#hasChild') for obj in base_graph.objects(predicate=has_child): self.delete(str(obj))
python
def flush(self): """Method flushes repository, deleting all objects""" base_graph = rdflib.Graph().parse('{}/rest'.format(self.base_url)) has_child = rdflib.URIRef( 'http://fedora.info/definitions/v4/repository#hasChild') for obj in base_graph.objects(predicate=has_child): self.delete(str(obj))
[ "def", "flush", "(", "self", ")", ":", "base_graph", "=", "rdflib", ".", "Graph", "(", ")", ".", "parse", "(", "'{}/rest'", ".", "format", "(", "self", ".", "base_url", ")", ")", "has_child", "=", "rdflib", ".", "URIRef", "(", "'http://fedora.info/definitions/v4/repository#hasChild'", ")", "for", "obj", "in", "base_graph", ".", "objects", "(", "predicate", "=", "has_child", ")", ":", "self", ".", "delete", "(", "str", "(", "obj", ")", ")" ]
Method flushes repository, deleting all objects
[ "Method", "flushes", "repository", "deleting", "all", "objects" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L352-L358
243,938
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.insert
def insert(self, entity_id, property_uri, value): """Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise """ if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id if entity_uri.endswith("/"): entity_uri = entity_uri[:-1] if not entity_id.endswith("fcr:metadata"): entity_uri = "/".join([entity_uri, "fcr:metadata"]) if not self.exists(entity_id): self.create(entity_id) sparql_template = Template("""$prefix INSERT DATA { <$entity> $prop_uri $value_str ; }""") sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) try: response = urllib.request.urlopen(update_request) except urllib.error.HTTPError: print("Error trying patch {}, sparql=\n{}".format(entity_uri, sparql)) return False if response.code < 400: return True return False
python
def insert(self, entity_id, property_uri, value): """Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise """ if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id if entity_uri.endswith("/"): entity_uri = entity_uri[:-1] if not entity_id.endswith("fcr:metadata"): entity_uri = "/".join([entity_uri, "fcr:metadata"]) if not self.exists(entity_id): self.create(entity_id) sparql_template = Template("""$prefix INSERT DATA { <$entity> $prop_uri $value_str ; }""") sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) try: response = urllib.request.urlopen(update_request) except urllib.error.HTTPError: print("Error trying patch {}, sparql=\n{}".format(entity_uri, sparql)) return False if response.code < 400: return True return False
[ "def", "insert", "(", "self", ",", "entity_id", ",", "property_uri", ",", "value", ")", ":", "if", "not", "entity_id", ".", "startswith", "(", "\"http\"", ")", ":", "entity_uri", "=", "urllib", ".", "parse", ".", "urljoin", "(", "self", ".", "base_url", ",", "entity_id", ")", "else", ":", "entity_uri", "=", "entity_id", "if", "entity_uri", ".", "endswith", "(", "\"/\"", ")", ":", "entity_uri", "=", "entity_uri", "[", ":", "-", "1", "]", "if", "not", "entity_id", ".", "endswith", "(", "\"fcr:metadata\"", ")", ":", "entity_uri", "=", "\"/\"", ".", "join", "(", "[", "entity_uri", ",", "\"fcr:metadata\"", "]", ")", "if", "not", "self", ".", "exists", "(", "entity_id", ")", ":", "self", ".", "create", "(", "entity_id", ")", "sparql_template", "=", "Template", "(", "\"\"\"$prefix\n INSERT DATA {\n <$entity> $prop_uri $value_str ;\n }\"\"\"", ")", "sparql", "=", "sparql_template", ".", "substitute", "(", "prefix", "=", "build_prefixes", "(", "self", ".", "namespaces", ")", ",", "entity", "=", "entity_uri", ",", "prop_uri", "=", "property_uri", ",", "value_str", "=", "self", ".", "__value_format__", "(", "value", ")", ")", "update_request", "=", "urllib", ".", "request", ".", "Request", "(", "entity_uri", ",", "data", "=", "sparql", ".", "encode", "(", ")", ",", "method", "=", "'PATCH'", ",", "headers", "=", "{", "'Content-Type'", ":", "'application/sparql-update'", "}", ")", "try", ":", "response", "=", "urllib", ".", "request", ".", "urlopen", "(", "update_request", ")", "except", "urllib", ".", "error", ".", "HTTPError", ":", "print", "(", "\"Error trying patch {}, sparql=\\n{}\"", ".", "format", "(", "entity_uri", ",", "sparql", ")", ")", "return", "False", "if", "response", ".", "code", "<", "400", ":", "return", "True", "return", "False" ]
Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise
[ "Method", "inserts", "a", "new", "entity", "s", "property", "in", "Fedora4", "Repository" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L360-L406
243,939
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.read
def read(self, uri): """Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph """ read_response = self.connect(uri) fedora_graph = rdflib.Graph().parse( data=read_response.read(), format='turtle') return fedora_graph
python
def read(self, uri): """Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph """ read_response = self.connect(uri) fedora_graph = rdflib.Graph().parse( data=read_response.read(), format='turtle') return fedora_graph
[ "def", "read", "(", "self", ",", "uri", ")", ":", "read_response", "=", "self", ".", "connect", "(", "uri", ")", "fedora_graph", "=", "rdflib", ".", "Graph", "(", ")", ".", "parse", "(", "data", "=", "read_response", ".", "read", "(", ")", ",", "format", "=", "'turtle'", ")", "return", "fedora_graph" ]
Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph
[ "Method", "takes", "uri", "and", "creates", "a", "RDF", "graph", "from", "Fedora", "Repository" ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L409-L422
243,940
jermnelson/flask-fedora-commons
flask_fedora_commons/__init__.py
Repository.search
def search(self, query_term): """DEPRECIATED Method takes a query term and searches Fedora Repository using SPARQL search endpoint and returns a RDF graph of the search results. Args: query_term(str): String to search repository Returns: rdflib.Graph() """ fedora_search_url = "/".join([self.base_url, 'rest', 'fcr:search']) fedora_search_url = "{}?{}".format( fedora_search_url, urllib.parse.urlencode({"q": query_term})) search_request = urllib.request.Request( fedora_search_url, method='GET') search_request.add_header('Accept', 'text/turtle') try: search_response = urllib.request.urlopen(search_request) except urllib.error.URLError as error: raise error fedora_results = rdflib.Graph().parse( data=search_response.read(), format='turtle') return fedora_results
python
def search(self, query_term): """DEPRECIATED Method takes a query term and searches Fedora Repository using SPARQL search endpoint and returns a RDF graph of the search results. Args: query_term(str): String to search repository Returns: rdflib.Graph() """ fedora_search_url = "/".join([self.base_url, 'rest', 'fcr:search']) fedora_search_url = "{}?{}".format( fedora_search_url, urllib.parse.urlencode({"q": query_term})) search_request = urllib.request.Request( fedora_search_url, method='GET') search_request.add_header('Accept', 'text/turtle') try: search_response = urllib.request.urlopen(search_request) except urllib.error.URLError as error: raise error fedora_results = rdflib.Graph().parse( data=search_response.read(), format='turtle') return fedora_results
[ "def", "search", "(", "self", ",", "query_term", ")", ":", "fedora_search_url", "=", "\"/\"", ".", "join", "(", "[", "self", ".", "base_url", ",", "'rest'", ",", "'fcr:search'", "]", ")", "fedora_search_url", "=", "\"{}?{}\"", ".", "format", "(", "fedora_search_url", ",", "urllib", ".", "parse", ".", "urlencode", "(", "{", "\"q\"", ":", "query_term", "}", ")", ")", "search_request", "=", "urllib", ".", "request", ".", "Request", "(", "fedora_search_url", ",", "method", "=", "'GET'", ")", "search_request", ".", "add_header", "(", "'Accept'", ",", "'text/turtle'", ")", "try", ":", "search_response", "=", "urllib", ".", "request", ".", "urlopen", "(", "search_request", ")", "except", "urllib", ".", "error", ".", "URLError", "as", "error", ":", "raise", "error", "fedora_results", "=", "rdflib", ".", "Graph", "(", ")", ".", "parse", "(", "data", "=", "search_response", ".", "read", "(", ")", ",", "format", "=", "'turtle'", ")", "return", "fedora_results" ]
DEPRECIATED Method takes a query term and searches Fedora Repository using SPARQL search endpoint and returns a RDF graph of the search results. Args: query_term(str): String to search repository Returns: rdflib.Graph()
[ "DEPRECIATED", "Method", "takes", "a", "query", "term", "and", "searches", "Fedora", "Repository", "using", "SPARQL", "search", "endpoint", "and", "returns", "a", "RDF", "graph", "of", "the", "search", "results", "." ]
81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4
https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L505-L531
243,941
etcher-be/emiz
emiz/weather/mission_weather/mission_weather.py
MissionWeather._normalize_direction
def _normalize_direction(heading: int) -> int: """ Make sure that 0 < heading < 360 Args: heading: base heading Returns: corrected heading """ while heading > 359: heading = int(heading - 359) while heading < 0: heading = int(heading + 359) return heading
python
def _normalize_direction(heading: int) -> int: """ Make sure that 0 < heading < 360 Args: heading: base heading Returns: corrected heading """ while heading > 359: heading = int(heading - 359) while heading < 0: heading = int(heading + 359) return heading
[ "def", "_normalize_direction", "(", "heading", ":", "int", ")", "->", "int", ":", "while", "heading", ">", "359", ":", "heading", "=", "int", "(", "heading", "-", "359", ")", "while", "heading", "<", "0", ":", "heading", "=", "int", "(", "heading", "+", "359", ")", "return", "heading" ]
Make sure that 0 < heading < 360 Args: heading: base heading Returns: corrected heading
[ "Make", "sure", "that", "0", "<", "heading", "<", "360" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mission_weather/mission_weather.py#L102-L116
243,942
etcher-be/emiz
emiz/weather/mission_weather/mission_weather.py
MissionWeather._gauss
def _gauss(mean: int, sigma: int) -> int: """ Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value """ return int(random.gauss(mean, sigma))
python
def _gauss(mean: int, sigma: int) -> int: """ Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value """ return int(random.gauss(mean, sigma))
[ "def", "_gauss", "(", "mean", ":", "int", ",", "sigma", ":", "int", ")", "->", "int", ":", "return", "int", "(", "random", ".", "gauss", "(", "mean", ",", "sigma", ")", ")" ]
Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value
[ "Creates", "a", "variation", "from", "a", "base", "value" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mission_weather/mission_weather.py#L119-L130
243,943
etcher-be/emiz
emiz/weather/mission_weather/mission_weather.py
MissionWeather._randomize_speed
def _randomize_speed(base_speed: int, sigma: int = None) -> int: """ Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed """ if sigma is None: int_sigma = int(base_speed / 4) else: int_sigma = sigma val = MissionWeather._gauss(base_speed, int_sigma) if val < 0: return 0 return min(val, 50)
python
def _randomize_speed(base_speed: int, sigma: int = None) -> int: """ Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed """ if sigma is None: int_sigma = int(base_speed / 4) else: int_sigma = sigma val = MissionWeather._gauss(base_speed, int_sigma) if val < 0: return 0 return min(val, 50)
[ "def", "_randomize_speed", "(", "base_speed", ":", "int", ",", "sigma", ":", "int", "=", "None", ")", "->", "int", ":", "if", "sigma", "is", "None", ":", "int_sigma", "=", "int", "(", "base_speed", "/", "4", ")", "else", ":", "int_sigma", "=", "sigma", "val", "=", "MissionWeather", ".", "_gauss", "(", "base_speed", ",", "int_sigma", ")", "if", "val", "<", "0", ":", "return", "0", "return", "min", "(", "val", ",", "50", ")" ]
Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed
[ "Creates", "a", "variation", "in", "wind", "speed" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mission_weather/mission_weather.py#L133-L151
243,944
etcher-be/emiz
emiz/weather/mission_weather/mission_weather.py
MissionWeather._randomize_direction
def _randomize_direction(base_heading, sigma) -> int: """ Creates a variation in direction Args: base_heading: base direction sigma: sigma value for gaussian variation Returns: random direction """ val = MissionWeather._gauss(base_heading, sigma) val = MissionWeather._normalize_direction(val) return val
python
def _randomize_direction(base_heading, sigma) -> int: """ Creates a variation in direction Args: base_heading: base direction sigma: sigma value for gaussian variation Returns: random direction """ val = MissionWeather._gauss(base_heading, sigma) val = MissionWeather._normalize_direction(val) return val
[ "def", "_randomize_direction", "(", "base_heading", ",", "sigma", ")", "->", "int", ":", "val", "=", "MissionWeather", ".", "_gauss", "(", "base_heading", ",", "sigma", ")", "val", "=", "MissionWeather", ".", "_normalize_direction", "(", "val", ")", "return", "val" ]
Creates a variation in direction Args: base_heading: base direction sigma: sigma value for gaussian variation Returns: random direction
[ "Creates", "a", "variation", "in", "direction" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/mission_weather/mission_weather.py#L154-L167
243,945
matthewdeanmartin/jiggle_version
jiggle_version/find_version_by_package.py
guess_version_by_running_live_package
def guess_version_by_running_live_package( pkg_key, default="?" ): # type: (str,str) -> Any """Guess the version of a pkg when pip doesn't provide it. :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string """ try: m = import_module(pkg_key) except ImportError: return default else: return getattr(m, "__version__", default)
python
def guess_version_by_running_live_package( pkg_key, default="?" ): # type: (str,str) -> Any """Guess the version of a pkg when pip doesn't provide it. :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string """ try: m = import_module(pkg_key) except ImportError: return default else: return getattr(m, "__version__", default)
[ "def", "guess_version_by_running_live_package", "(", "pkg_key", ",", "default", "=", "\"?\"", ")", ":", "# type: (str,str) -> Any", "try", ":", "m", "=", "import_module", "(", "pkg_key", ")", "except", "ImportError", ":", "return", "default", "else", ":", "return", "getattr", "(", "m", ",", "\"__version__\"", ",", "default", ")" ]
Guess the version of a pkg when pip doesn't provide it. :param str pkg_key: key of the package :param str default: default version to return if unable to find :returns: version :rtype: string
[ "Guess", "the", "version", "of", "a", "pkg", "when", "pip", "doesn", "t", "provide", "it", "." ]
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/find_version_by_package.py#L58-L74
243,946
bosha/pypushalot
pushalot/apis.py
BaseAPI.get_api_params
def get_api_params(self): """Dictionary with available API parameters :raises ValueError: If value of __class__.params is not dictionary :return: Should return dict with available pushalot API methods :rtype: dict """ result = self.params if type(result) != dict: raise ValueError( '{}.params should return dictionary'.format( self.__class__.__name__ ) ) return result
python
def get_api_params(self): """Dictionary with available API parameters :raises ValueError: If value of __class__.params is not dictionary :return: Should return dict with available pushalot API methods :rtype: dict """ result = self.params if type(result) != dict: raise ValueError( '{}.params should return dictionary'.format( self.__class__.__name__ ) ) return result
[ "def", "get_api_params", "(", "self", ")", ":", "result", "=", "self", ".", "params", "if", "type", "(", "result", ")", "!=", "dict", ":", "raise", "ValueError", "(", "'{}.params should return dictionary'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "result" ]
Dictionary with available API parameters :raises ValueError: If value of __class__.params is not dictionary :return: Should return dict with available pushalot API methods :rtype: dict
[ "Dictionary", "with", "available", "API", "parameters" ]
c1b5c941210ae61716e1695816a40c1676733739
https://github.com/bosha/pypushalot/blob/c1b5c941210ae61716e1695816a40c1676733739/pushalot/apis.py#L24-L38
243,947
bosha/pypushalot
pushalot/apis.py
BaseAPI.get_api_required_params
def get_api_required_params(self): """ List with required params :return: Dictionary with API parameters :raises ValueError: If value of __class__.required_params is not list :rtype: list """ result = self.required_params if type(result) != list: raise ValueError( '{}.required_params should return list'.format( self.__class__.__name__ ) ) return result
python
def get_api_required_params(self): """ List with required params :return: Dictionary with API parameters :raises ValueError: If value of __class__.required_params is not list :rtype: list """ result = self.required_params if type(result) != list: raise ValueError( '{}.required_params should return list'.format( self.__class__.__name__ ) ) return result
[ "def", "get_api_required_params", "(", "self", ")", ":", "result", "=", "self", ".", "required_params", "if", "type", "(", "result", ")", "!=", "list", ":", "raise", "ValueError", "(", "'{}.required_params should return list'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "return", "result" ]
List with required params :return: Dictionary with API parameters :raises ValueError: If value of __class__.required_params is not list :rtype: list
[ "List", "with", "required", "params" ]
c1b5c941210ae61716e1695816a40c1676733739
https://github.com/bosha/pypushalot/blob/c1b5c941210ae61716e1695816a40c1676733739/pushalot/apis.py#L40-L54
243,948
bosha/pypushalot
pushalot/apis.py
BaseAPI._build_params_from_kwargs
def _build_params_from_kwargs(self, **kwargs): """Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict """ api_methods = self.get_api_params() required_methods = self.get_api_required_params() ret_kwargs = {} for key, val in kwargs.items(): if key not in api_methods: warnings.warn( 'Passed uknown parameter [{}]'.format(key), Warning ) continue if key not in required_methods and val is None: continue if type(val) != api_methods[key]['type']: raise ValueError( "Invalid type specified to param: {}".format(key) ) if 'max_len' in api_methods[key]: if len(val) > api_methods[key]['max_len']: raise ValueError( "Lenght of parameter [{}] more than " "allowed length".format(key) ) ret_kwargs[api_methods[key]['param']] = val for item in required_methods: if item not in ret_kwargs: raise pushalot.exc.PushalotException( "Parameter [{}] required, but not set".format(item) ) return ret_kwargs
python
def _build_params_from_kwargs(self, **kwargs): """Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict """ api_methods = self.get_api_params() required_methods = self.get_api_required_params() ret_kwargs = {} for key, val in kwargs.items(): if key not in api_methods: warnings.warn( 'Passed uknown parameter [{}]'.format(key), Warning ) continue if key not in required_methods and val is None: continue if type(val) != api_methods[key]['type']: raise ValueError( "Invalid type specified to param: {}".format(key) ) if 'max_len' in api_methods[key]: if len(val) > api_methods[key]['max_len']: raise ValueError( "Lenght of parameter [{}] more than " "allowed length".format(key) ) ret_kwargs[api_methods[key]['param']] = val for item in required_methods: if item not in ret_kwargs: raise pushalot.exc.PushalotException( "Parameter [{}] required, but not set".format(item) ) return ret_kwargs
[ "def", "_build_params_from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "api_methods", "=", "self", ".", "get_api_params", "(", ")", "required_methods", "=", "self", ".", "get_api_required_params", "(", ")", "ret_kwargs", "=", "{", "}", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "not", "in", "api_methods", ":", "warnings", ".", "warn", "(", "'Passed uknown parameter [{}]'", ".", "format", "(", "key", ")", ",", "Warning", ")", "continue", "if", "key", "not", "in", "required_methods", "and", "val", "is", "None", ":", "continue", "if", "type", "(", "val", ")", "!=", "api_methods", "[", "key", "]", "[", "'type'", "]", ":", "raise", "ValueError", "(", "\"Invalid type specified to param: {}\"", ".", "format", "(", "key", ")", ")", "if", "'max_len'", "in", "api_methods", "[", "key", "]", ":", "if", "len", "(", "val", ")", ">", "api_methods", "[", "key", "]", "[", "'max_len'", "]", ":", "raise", "ValueError", "(", "\"Lenght of parameter [{}] more than \"", "\"allowed length\"", ".", "format", "(", "key", ")", ")", "ret_kwargs", "[", "api_methods", "[", "key", "]", "[", "'param'", "]", "]", "=", "val", "for", "item", "in", "required_methods", ":", "if", "item", "not", "in", "ret_kwargs", ":", "raise", "pushalot", ".", "exc", ".", "PushalotException", "(", "\"Parameter [{}] required, but not set\"", ".", "format", "(", "item", ")", ")", "return", "ret_kwargs" ]
Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict
[ "Builds", "parameters", "from", "passed", "arguments" ]
c1b5c941210ae61716e1695816a40c1676733739
https://github.com/bosha/pypushalot/blob/c1b5c941210ae61716e1695816a40c1676733739/pushalot/apis.py#L97-L145
243,949
ngmiller/mipsy
mipsy/util.py
LabelCache.write
def write(self, label, index): """ Saves a new label, index mapping to the cache. Raises a RuntimeError on a conflict. """ if label in self.cache: if self.cache[label] != index: error_message = 'cache_conflict on label: {} with index: {}\ncache dump: {}'.format(label, index, self.cache) raise RuntimeError(error_message) else: self.cache[label] = index
python
def write(self, label, index): """ Saves a new label, index mapping to the cache. Raises a RuntimeError on a conflict. """ if label in self.cache: if self.cache[label] != index: error_message = 'cache_conflict on label: {} with index: {}\ncache dump: {}'.format(label, index, self.cache) raise RuntimeError(error_message) else: self.cache[label] = index
[ "def", "write", "(", "self", ",", "label", ",", "index", ")", ":", "if", "label", "in", "self", ".", "cache", ":", "if", "self", ".", "cache", "[", "label", "]", "!=", "index", ":", "error_message", "=", "'cache_conflict on label: {} with index: {}\\ncache dump: {}'", ".", "format", "(", "label", ",", "index", ",", "self", ".", "cache", ")", "raise", "RuntimeError", "(", "error_message", ")", "else", ":", "self", ".", "cache", "[", "label", "]", "=", "index" ]
Saves a new label, index mapping to the cache. Raises a RuntimeError on a conflict.
[ "Saves", "a", "new", "label", "index", "mapping", "to", "the", "cache", ".", "Raises", "a", "RuntimeError", "on", "a", "conflict", "." ]
78c058f44685765193acd386e81fada3b4187b95
https://github.com/ngmiller/mipsy/blob/78c058f44685765193acd386e81fada3b4187b95/mipsy/util.py#L62-L72
243,950
joausaga/ideascaly
ideascaly/utils.py
pack_image
def pack_image(filename, max_size, form_field='image'): """Pack an image from file into multipart-formdata post body""" try: if os.path.getsize(filename) > (max_size * 1024): raise IdeaScalyError('File is too big, must be less than %skb.' % max_size) except os.error as e: raise IdeaScalyError('Unable to access file: %s' % e.strerror) # build the mulitpart-formdata body fp = open(filename, 'rb') # image must be gif, jpeg, or png file_type = mimetypes.guess_type(filename) if file_type is None: raise IdeaScalyError('Could not determine file type') file_type = file_type[0] if file_type not in ['image/gif', 'image/jpeg', 'image/png']: raise IdeaScalyError('Invalid file type for image: %s' % file_type) if isinstance(filename, six.text_type): filename = filename.encode('utf-8') BOUNDARY = b'Id34Sc4ly' body = list() body.append(b'--' + BOUNDARY) body.append('content-disposition: form-data; name="{0}";' ' filename="{1}"'.format(form_field, filename) .encode('utf-8')) body.append('content-type: {0}'.format(file_type).encode('utf-8')) body.append(b'') body.append(fp.read()) body.append(b'--' + BOUNDARY + b'--') body.append(b'') fp.close() body = b'\r\n'.join(body) body_length = str(len(body)) # build headers headers = { 'content-type': 'multipart/form-data; boundary={0}'.format(BOUNDARY), 'content-length': body_length } return headers, body
python
def pack_image(filename, max_size, form_field='image'): """Pack an image from file into multipart-formdata post body""" try: if os.path.getsize(filename) > (max_size * 1024): raise IdeaScalyError('File is too big, must be less than %skb.' % max_size) except os.error as e: raise IdeaScalyError('Unable to access file: %s' % e.strerror) # build the mulitpart-formdata body fp = open(filename, 'rb') # image must be gif, jpeg, or png file_type = mimetypes.guess_type(filename) if file_type is None: raise IdeaScalyError('Could not determine file type') file_type = file_type[0] if file_type not in ['image/gif', 'image/jpeg', 'image/png']: raise IdeaScalyError('Invalid file type for image: %s' % file_type) if isinstance(filename, six.text_type): filename = filename.encode('utf-8') BOUNDARY = b'Id34Sc4ly' body = list() body.append(b'--' + BOUNDARY) body.append('content-disposition: form-data; name="{0}";' ' filename="{1}"'.format(form_field, filename) .encode('utf-8')) body.append('content-type: {0}'.format(file_type).encode('utf-8')) body.append(b'') body.append(fp.read()) body.append(b'--' + BOUNDARY + b'--') body.append(b'') fp.close() body = b'\r\n'.join(body) body_length = str(len(body)) # build headers headers = { 'content-type': 'multipart/form-data; boundary={0}'.format(BOUNDARY), 'content-length': body_length } return headers, body
[ "def", "pack_image", "(", "filename", ",", "max_size", ",", "form_field", "=", "'image'", ")", ":", "try", ":", "if", "os", ".", "path", ".", "getsize", "(", "filename", ")", ">", "(", "max_size", "*", "1024", ")", ":", "raise", "IdeaScalyError", "(", "'File is too big, must be less than %skb.'", "%", "max_size", ")", "except", "os", ".", "error", "as", "e", ":", "raise", "IdeaScalyError", "(", "'Unable to access file: %s'", "%", "e", ".", "strerror", ")", "# build the mulitpart-formdata body", "fp", "=", "open", "(", "filename", ",", "'rb'", ")", "# image must be gif, jpeg, or png", "file_type", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "if", "file_type", "is", "None", ":", "raise", "IdeaScalyError", "(", "'Could not determine file type'", ")", "file_type", "=", "file_type", "[", "0", "]", "if", "file_type", "not", "in", "[", "'image/gif'", ",", "'image/jpeg'", ",", "'image/png'", "]", ":", "raise", "IdeaScalyError", "(", "'Invalid file type for image: %s'", "%", "file_type", ")", "if", "isinstance", "(", "filename", ",", "six", ".", "text_type", ")", ":", "filename", "=", "filename", ".", "encode", "(", "'utf-8'", ")", "BOUNDARY", "=", "b'Id34Sc4ly'", "body", "=", "list", "(", ")", "body", ".", "append", "(", "b'--'", "+", "BOUNDARY", ")", "body", ".", "append", "(", "'content-disposition: form-data; name=\"{0}\";'", "' filename=\"{1}\"'", ".", "format", "(", "form_field", ",", "filename", ")", ".", "encode", "(", "'utf-8'", ")", ")", "body", ".", "append", "(", "'content-type: {0}'", ".", "format", "(", "file_type", ")", ".", "encode", "(", "'utf-8'", ")", ")", "body", ".", "append", "(", "b''", ")", "body", ".", "append", "(", "fp", ".", "read", "(", ")", ")", "body", ".", "append", "(", "b'--'", "+", "BOUNDARY", "+", "b'--'", ")", "body", ".", "append", "(", "b''", ")", "fp", ".", "close", "(", ")", "body", "=", "b'\\r\\n'", ".", "join", "(", "body", ")", "body_length", "=", "str", "(", "len", "(", "body", ")", ")", "# build headers", "headers", "=", "{", "'content-type'", ":", "'multipart/form-data; boundary={0}'", ".", "format", "(", "BOUNDARY", ")", ",", "'content-length'", ":", "body_length", "}", "return", "headers", ",", "body" ]
Pack an image from file into multipart-formdata post body
[ "Pack", "an", "image", "from", "file", "into", "multipart", "-", "formdata", "post", "body" ]
8aabb7bb1ed058406a8d352a7844e183ab64e008
https://github.com/joausaga/ideascaly/blob/8aabb7bb1ed058406a8d352a7844e183ab64e008/ideascaly/utils.py#L63-L106
243,951
Diaoul/pyextdirect
pyextdirect/configuration.py
create_configuration
def create_configuration(name='Base'): """Create a configuration base class It is built using :class:`ConfigurationMeta`. Subclassing such a base class will register exposed methods .. class:: Base .. attribute:: configuration Configuration dict that can be used by a Router or the API .. classmethod:: register(element, action, method) Register an element in the :attr:`configuration` :param element: the element to register :type element: tuple of (class, method name) or function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method """ @classmethod def register(cls, element, action, method): if not action in cls.configuration: cls.configuration[action] = {} if method in cls.configuration[action]: raise ValueError('Method %s already defined for action %s' % (method, action)) cls.configuration[action][method] = element return ConfigurationMeta(name, (object,), {'configuration': {}, 'register': register})
python
def create_configuration(name='Base'): """Create a configuration base class It is built using :class:`ConfigurationMeta`. Subclassing such a base class will register exposed methods .. class:: Base .. attribute:: configuration Configuration dict that can be used by a Router or the API .. classmethod:: register(element, action, method) Register an element in the :attr:`configuration` :param element: the element to register :type element: tuple of (class, method name) or function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method """ @classmethod def register(cls, element, action, method): if not action in cls.configuration: cls.configuration[action] = {} if method in cls.configuration[action]: raise ValueError('Method %s already defined for action %s' % (method, action)) cls.configuration[action][method] = element return ConfigurationMeta(name, (object,), {'configuration': {}, 'register': register})
[ "def", "create_configuration", "(", "name", "=", "'Base'", ")", ":", "@", "classmethod", "def", "register", "(", "cls", ",", "element", ",", "action", ",", "method", ")", ":", "if", "not", "action", "in", "cls", ".", "configuration", ":", "cls", ".", "configuration", "[", "action", "]", "=", "{", "}", "if", "method", "in", "cls", ".", "configuration", "[", "action", "]", ":", "raise", "ValueError", "(", "'Method %s already defined for action %s'", "%", "(", "method", ",", "action", ")", ")", "cls", ".", "configuration", "[", "action", "]", "[", "method", "]", "=", "element", "return", "ConfigurationMeta", "(", "name", ",", "(", "object", ",", ")", ",", "{", "'configuration'", ":", "{", "}", ",", "'register'", ":", "register", "}", ")" ]
Create a configuration base class It is built using :class:`ConfigurationMeta`. Subclassing such a base class will register exposed methods .. class:: Base .. attribute:: configuration Configuration dict that can be used by a Router or the API .. classmethod:: register(element, action, method) Register an element in the :attr:`configuration` :param element: the element to register :type element: tuple of (class, method name) or function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method
[ "Create", "a", "configuration", "base", "class" ]
34ddfe882d467b3769644e8131fb90fe472eff80
https://github.com/Diaoul/pyextdirect/blob/34ddfe882d467b3769644e8131fb90fe472eff80/pyextdirect/configuration.py#L53-L82
243,952
Diaoul/pyextdirect
pyextdirect/configuration.py
expose
def expose(f=None, base=None, action=None, method=None, kind=BASIC): """Decorator to expose a function .. note:: A module function can be decorated but ``base`` parameter has to be specified :param f: function to expose :type f: function or None :param base: base class that can register the function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method :param kind: kind of the method :type kind: :data:`BASIC` or :data:`LOAD` or :data:`SUBMIT` """ def expose_f(f): f.exposed = True f.exposed_action = action f.exposed_method = method f.exposed_kind = kind return f def register_f(f): f = expose_f(f) base.register(f, action or f.__module__, method or f.__name__) return f if f is not None: # @expose case (no parameters) return expose_f(f) if base is not None: # module-level function case return register_f return expose_f
python
def expose(f=None, base=None, action=None, method=None, kind=BASIC): """Decorator to expose a function .. note:: A module function can be decorated but ``base`` parameter has to be specified :param f: function to expose :type f: function or None :param base: base class that can register the function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method :param kind: kind of the method :type kind: :data:`BASIC` or :data:`LOAD` or :data:`SUBMIT` """ def expose_f(f): f.exposed = True f.exposed_action = action f.exposed_method = method f.exposed_kind = kind return f def register_f(f): f = expose_f(f) base.register(f, action or f.__module__, method or f.__name__) return f if f is not None: # @expose case (no parameters) return expose_f(f) if base is not None: # module-level function case return register_f return expose_f
[ "def", "expose", "(", "f", "=", "None", ",", "base", "=", "None", ",", "action", "=", "None", ",", "method", "=", "None", ",", "kind", "=", "BASIC", ")", ":", "def", "expose_f", "(", "f", ")", ":", "f", ".", "exposed", "=", "True", "f", ".", "exposed_action", "=", "action", "f", ".", "exposed_method", "=", "method", "f", ".", "exposed_kind", "=", "kind", "return", "f", "def", "register_f", "(", "f", ")", ":", "f", "=", "expose_f", "(", "f", ")", "base", ".", "register", "(", "f", ",", "action", "or", "f", ".", "__module__", ",", "method", "or", "f", ".", "__name__", ")", "return", "f", "if", "f", "is", "not", "None", ":", "# @expose case (no parameters)", "return", "expose_f", "(", "f", ")", "if", "base", "is", "not", "None", ":", "# module-level function case", "return", "register_f", "return", "expose_f" ]
Decorator to expose a function .. note:: A module function can be decorated but ``base`` parameter has to be specified :param f: function to expose :type f: function or None :param base: base class that can register the function :param string action: name of the exposed action that will hold the method :param string method: name of the exposed method :param kind: kind of the method :type kind: :data:`BASIC` or :data:`LOAD` or :data:`SUBMIT`
[ "Decorator", "to", "expose", "a", "function" ]
34ddfe882d467b3769644e8131fb90fe472eff80
https://github.com/Diaoul/pyextdirect/blob/34ddfe882d467b3769644e8131fb90fe472eff80/pyextdirect/configuration.py#L85-L116
243,953
Diaoul/pyextdirect
pyextdirect/configuration.py
merge_configurations
def merge_configurations(configurations): """Merge configurations together and raise error if a conflict is detected :param configurations: configurations to merge together :type configurations: list of :attr:`~pyextdirect.configuration.Base.configuration` dicts :return: merged configurations as a single one :rtype: dict """ configuration = {} for c in configurations: for k, v in c.iteritems(): if k in configuration: raise ValueError('%s already in a previous base configuration' % k) configuration[k] = v return configuration
python
def merge_configurations(configurations): """Merge configurations together and raise error if a conflict is detected :param configurations: configurations to merge together :type configurations: list of :attr:`~pyextdirect.configuration.Base.configuration` dicts :return: merged configurations as a single one :rtype: dict """ configuration = {} for c in configurations: for k, v in c.iteritems(): if k in configuration: raise ValueError('%s already in a previous base configuration' % k) configuration[k] = v return configuration
[ "def", "merge_configurations", "(", "configurations", ")", ":", "configuration", "=", "{", "}", "for", "c", "in", "configurations", ":", "for", "k", ",", "v", "in", "c", ".", "iteritems", "(", ")", ":", "if", "k", "in", "configuration", ":", "raise", "ValueError", "(", "'%s already in a previous base configuration'", "%", "k", ")", "configuration", "[", "k", "]", "=", "v", "return", "configuration" ]
Merge configurations together and raise error if a conflict is detected :param configurations: configurations to merge together :type configurations: list of :attr:`~pyextdirect.configuration.Base.configuration` dicts :return: merged configurations as a single one :rtype: dict
[ "Merge", "configurations", "together", "and", "raise", "error", "if", "a", "conflict", "is", "detected" ]
34ddfe882d467b3769644e8131fb90fe472eff80
https://github.com/Diaoul/pyextdirect/blob/34ddfe882d467b3769644e8131fb90fe472eff80/pyextdirect/configuration.py#L119-L134
243,954
rameshg87/pyremotevbox
pyremotevbox/ZSI/dispatch.py
AsCGI
def AsCGI(nsdict={}, typesmodule=None, rpc=False, modules=None): '''Dispatch within a CGI script. ''' if os.environ.get('REQUEST_METHOD') != 'POST': _CGISendFault(Fault(Fault.Client, 'Must use POST')) return ct = os.environ['CONTENT_TYPE'] try: if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, sys.stdin) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(os.environ['CONTENT_LENGTH']) ps = ParsedSoap(sys.stdin.read(length)) except ParseException, e: _CGISendFault(FaultFromZSIException(e)) return _Dispatch(ps, modules, _CGISendXML, _CGISendFault, nsdict=nsdict, typesmodule=typesmodule, rpc=rpc)
python
def AsCGI(nsdict={}, typesmodule=None, rpc=False, modules=None): '''Dispatch within a CGI script. ''' if os.environ.get('REQUEST_METHOD') != 'POST': _CGISendFault(Fault(Fault.Client, 'Must use POST')) return ct = os.environ['CONTENT_TYPE'] try: if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, sys.stdin) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(os.environ['CONTENT_LENGTH']) ps = ParsedSoap(sys.stdin.read(length)) except ParseException, e: _CGISendFault(FaultFromZSIException(e)) return _Dispatch(ps, modules, _CGISendXML, _CGISendFault, nsdict=nsdict, typesmodule=typesmodule, rpc=rpc)
[ "def", "AsCGI", "(", "nsdict", "=", "{", "}", ",", "typesmodule", "=", "None", ",", "rpc", "=", "False", ",", "modules", "=", "None", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "'REQUEST_METHOD'", ")", "!=", "'POST'", ":", "_CGISendFault", "(", "Fault", "(", "Fault", ".", "Client", ",", "'Must use POST'", ")", ")", "return", "ct", "=", "os", ".", "environ", "[", "'CONTENT_TYPE'", "]", "try", ":", "if", "ct", ".", "startswith", "(", "'multipart/'", ")", ":", "cid", "=", "resolvers", ".", "MIMEResolver", "(", "ct", ",", "sys", ".", "stdin", ")", "xml", "=", "cid", ".", "GetSOAPPart", "(", ")", "ps", "=", "ParsedSoap", "(", "xml", ",", "resolver", "=", "cid", ".", "Resolve", ")", "else", ":", "length", "=", "int", "(", "os", ".", "environ", "[", "'CONTENT_LENGTH'", "]", ")", "ps", "=", "ParsedSoap", "(", "sys", ".", "stdin", ".", "read", "(", "length", ")", ")", "except", "ParseException", ",", "e", ":", "_CGISendFault", "(", "FaultFromZSIException", "(", "e", ")", ")", "return", "_Dispatch", "(", "ps", ",", "modules", ",", "_CGISendXML", ",", "_CGISendFault", ",", "nsdict", "=", "nsdict", ",", "typesmodule", "=", "typesmodule", ",", "rpc", "=", "rpc", ")" ]
Dispatch within a CGI script.
[ "Dispatch", "within", "a", "CGI", "script", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/dispatch.py#L248-L267
243,955
rameshg87/pyremotevbox
pyremotevbox/ZSI/dispatch.py
AsHandler
def AsHandler(request=None, modules=None, **kw): '''Dispatch from within ModPython.''' ps = ParsedSoap(request) kw['request'] = request _Dispatch(ps, modules, _ModPythonSendXML, _ModPythonSendFault, **kw)
python
def AsHandler(request=None, modules=None, **kw): '''Dispatch from within ModPython.''' ps = ParsedSoap(request) kw['request'] = request _Dispatch(ps, modules, _ModPythonSendXML, _ModPythonSendFault, **kw)
[ "def", "AsHandler", "(", "request", "=", "None", ",", "modules", "=", "None", ",", "*", "*", "kw", ")", ":", "ps", "=", "ParsedSoap", "(", "request", ")", "kw", "[", "'request'", "]", "=", "request", "_Dispatch", "(", "ps", ",", "modules", ",", "_ModPythonSendXML", ",", "_ModPythonSendFault", ",", "*", "*", "kw", ")" ]
Dispatch from within ModPython.
[ "Dispatch", "from", "within", "ModPython", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/dispatch.py#L269-L273
243,956
rameshg87/pyremotevbox
pyremotevbox/ZSI/dispatch.py
SOAPRequestHandler.send_xml
def send_xml(self, text, code=200): '''Send some XML. ''' self.send_response(code) if text: self.send_header('Content-type', 'text/xml; charset="%s"' %UNICODE_ENCODING) self.send_header('Content-Length', str(len(text))) self.end_headers() if text: self.wfile.write(text) self.wfile.flush()
python
def send_xml(self, text, code=200): '''Send some XML. ''' self.send_response(code) if text: self.send_header('Content-type', 'text/xml; charset="%s"' %UNICODE_ENCODING) self.send_header('Content-Length', str(len(text))) self.end_headers() if text: self.wfile.write(text) self.wfile.flush()
[ "def", "send_xml", "(", "self", ",", "text", ",", "code", "=", "200", ")", ":", "self", ".", "send_response", "(", "code", ")", "if", "text", ":", "self", ".", "send_header", "(", "'Content-type'", ",", "'text/xml; charset=\"%s\"'", "%", "UNICODE_ENCODING", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "str", "(", "len", "(", "text", ")", ")", ")", "self", ".", "end_headers", "(", ")", "if", "text", ":", "self", ".", "wfile", ".", "write", "(", "text", ")", "self", ".", "wfile", ".", "flush", "(", ")" ]
Send some XML.
[ "Send", "some", "XML", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/dispatch.py#L192-L206
243,957
rameshg87/pyremotevbox
pyremotevbox/ZSI/dispatch.py
SOAPRequestHandler.do_POST
def do_POST(self): '''The POST command. ''' try: ct = self.headers['content-type'] if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, self.rfile) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(self.headers['content-length']) ps = ParsedSoap(self.rfile.read(length)) except ParseException, e: self.send_fault(FaultFromZSIException(e)) return except Exception, e: # Faulted while processing; assume it's in the header. self.send_fault(FaultFromException(e, 1, sys.exc_info()[2])) return _Dispatch(ps, self.server.modules, self.send_xml, self.send_fault, docstyle=self.server.docstyle, nsdict=self.server.nsdict, typesmodule=self.server.typesmodule, rpc=self.server.rpc)
python
def do_POST(self): '''The POST command. ''' try: ct = self.headers['content-type'] if ct.startswith('multipart/'): cid = resolvers.MIMEResolver(ct, self.rfile) xml = cid.GetSOAPPart() ps = ParsedSoap(xml, resolver=cid.Resolve) else: length = int(self.headers['content-length']) ps = ParsedSoap(self.rfile.read(length)) except ParseException, e: self.send_fault(FaultFromZSIException(e)) return except Exception, e: # Faulted while processing; assume it's in the header. self.send_fault(FaultFromException(e, 1, sys.exc_info()[2])) return _Dispatch(ps, self.server.modules, self.send_xml, self.send_fault, docstyle=self.server.docstyle, nsdict=self.server.nsdict, typesmodule=self.server.typesmodule, rpc=self.server.rpc)
[ "def", "do_POST", "(", "self", ")", ":", "try", ":", "ct", "=", "self", ".", "headers", "[", "'content-type'", "]", "if", "ct", ".", "startswith", "(", "'multipart/'", ")", ":", "cid", "=", "resolvers", ".", "MIMEResolver", "(", "ct", ",", "self", ".", "rfile", ")", "xml", "=", "cid", ".", "GetSOAPPart", "(", ")", "ps", "=", "ParsedSoap", "(", "xml", ",", "resolver", "=", "cid", ".", "Resolve", ")", "else", ":", "length", "=", "int", "(", "self", ".", "headers", "[", "'content-length'", "]", ")", "ps", "=", "ParsedSoap", "(", "self", ".", "rfile", ".", "read", "(", "length", ")", ")", "except", "ParseException", ",", "e", ":", "self", ".", "send_fault", "(", "FaultFromZSIException", "(", "e", ")", ")", "return", "except", "Exception", ",", "e", ":", "# Faulted while processing; assume it's in the header.", "self", ".", "send_fault", "(", "FaultFromException", "(", "e", ",", "1", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", ")", "return", "_Dispatch", "(", "ps", ",", "self", ".", "server", ".", "modules", ",", "self", ".", "send_xml", ",", "self", ".", "send_fault", ",", "docstyle", "=", "self", ".", "server", ".", "docstyle", ",", "nsdict", "=", "self", ".", "server", ".", "nsdict", ",", "typesmodule", "=", "self", ".", "server", ".", "typesmodule", ",", "rpc", "=", "self", ".", "server", ".", "rpc", ")" ]
The POST command.
[ "The", "POST", "command", "." ]
123dffff27da57c8faa3ac1dd4c68b1cf4558b1a
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/dispatch.py#L213-L235
243,958
narfman0/helga-markovify
helga_markovify/markov.py
punctuate
def punctuate(current_text, new_text, add_punctuation): """ Add punctuation as needed """ if add_punctuation and current_text and not current_text[-1] in string.punctuation: current_text += '. ' spacer = ' ' if not current_text or (not current_text[-1].isspace() and not new_text[0].isspace()) else '' return current_text + spacer + new_text
python
def punctuate(current_text, new_text, add_punctuation): """ Add punctuation as needed """ if add_punctuation and current_text and not current_text[-1] in string.punctuation: current_text += '. ' spacer = ' ' if not current_text or (not current_text[-1].isspace() and not new_text[0].isspace()) else '' return current_text + spacer + new_text
[ "def", "punctuate", "(", "current_text", ",", "new_text", ",", "add_punctuation", ")", ":", "if", "add_punctuation", "and", "current_text", "and", "not", "current_text", "[", "-", "1", "]", "in", "string", ".", "punctuation", ":", "current_text", "+=", "'. '", "spacer", "=", "' '", "if", "not", "current_text", "or", "(", "not", "current_text", "[", "-", "1", "]", ".", "isspace", "(", ")", "and", "not", "new_text", "[", "0", "]", ".", "isspace", "(", ")", ")", "else", "''", "return", "current_text", "+", "spacer", "+", "new_text" ]
Add punctuation as needed
[ "Add", "punctuation", "as", "needed" ]
b5a82de070102e6da1fd3f5f81cad12d0a9185d8
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/markov.py#L7-L12
243,959
narfman0/helga-markovify
helga_markovify/markov.py
ingest
def ingest(topic, text, **kwargs): """ Ingest the given text for the topic """ if not text: raise ValueError('No text given to ingest for topic: ' + topic) data = {'topic': topic, 'text': text.strip()} data.update(kwargs) db.markovify.insert(data)
python
def ingest(topic, text, **kwargs): """ Ingest the given text for the topic """ if not text: raise ValueError('No text given to ingest for topic: ' + topic) data = {'topic': topic, 'text': text.strip()} data.update(kwargs) db.markovify.insert(data)
[ "def", "ingest", "(", "topic", ",", "text", ",", "*", "*", "kwargs", ")", ":", "if", "not", "text", ":", "raise", "ValueError", "(", "'No text given to ingest for topic: '", "+", "topic", ")", "data", "=", "{", "'topic'", ":", "topic", ",", "'text'", ":", "text", ".", "strip", "(", ")", "}", "data", ".", "update", "(", "kwargs", ")", "db", ".", "markovify", ".", "insert", "(", "data", ")" ]
Ingest the given text for the topic
[ "Ingest", "the", "given", "text", "for", "the", "topic" ]
b5a82de070102e6da1fd3f5f81cad12d0a9185d8
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/markov.py#L15-L21
243,960
narfman0/helga-markovify
helga_markovify/markov.py
generate
def generate(topic, add_punctuation, character_count=None): """ Generate the text for a given topic """ corpus_cursor = db.markovify.find({'topic': topic}) if(corpus_cursor): corpus = '' for text in corpus_cursor: corpus = punctuate(corpus, text['text'], add_punctuation) text_model = markovify.Text(corpus) sentence = text_model.make_short_sentence(character_count) \ if character_count else text_model.make_sentence() if not sentence: raise Exception('There is not enough in the corpus to generate a sentence.') return sentence raise Exception('No text found for topic: ' + topic)
python
def generate(topic, add_punctuation, character_count=None): """ Generate the text for a given topic """ corpus_cursor = db.markovify.find({'topic': topic}) if(corpus_cursor): corpus = '' for text in corpus_cursor: corpus = punctuate(corpus, text['text'], add_punctuation) text_model = markovify.Text(corpus) sentence = text_model.make_short_sentence(character_count) \ if character_count else text_model.make_sentence() if not sentence: raise Exception('There is not enough in the corpus to generate a sentence.') return sentence raise Exception('No text found for topic: ' + topic)
[ "def", "generate", "(", "topic", ",", "add_punctuation", ",", "character_count", "=", "None", ")", ":", "corpus_cursor", "=", "db", ".", "markovify", ".", "find", "(", "{", "'topic'", ":", "topic", "}", ")", "if", "(", "corpus_cursor", ")", ":", "corpus", "=", "''", "for", "text", "in", "corpus_cursor", ":", "corpus", "=", "punctuate", "(", "corpus", ",", "text", "[", "'text'", "]", ",", "add_punctuation", ")", "text_model", "=", "markovify", ".", "Text", "(", "corpus", ")", "sentence", "=", "text_model", ".", "make_short_sentence", "(", "character_count", ")", "if", "character_count", "else", "text_model", ".", "make_sentence", "(", ")", "if", "not", "sentence", ":", "raise", "Exception", "(", "'There is not enough in the corpus to generate a sentence.'", ")", "return", "sentence", "raise", "Exception", "(", "'No text found for topic: '", "+", "topic", ")" ]
Generate the text for a given topic
[ "Generate", "the", "text", "for", "a", "given", "topic" ]
b5a82de070102e6da1fd3f5f81cad12d0a9185d8
https://github.com/narfman0/helga-markovify/blob/b5a82de070102e6da1fd3f5f81cad12d0a9185d8/helga_markovify/markov.py#L24-L37
243,961
ravenac95/lxc4u
lxc4u/api.py
default_setup
def default_setup(): """The default API setup for lxc4u This is the API that you access globally from lxc4u. """ service = LXCService lxc_types = dict(LXC=LXC, LXCWithOverlays=LXCWithOverlays, __default__=UnmanagedLXC) loader = LXCLoader(lxc_types, service) manager = LXCManager(loader, service) return LXCAPI(manager=manager, service=service)
python
def default_setup(): """The default API setup for lxc4u This is the API that you access globally from lxc4u. """ service = LXCService lxc_types = dict(LXC=LXC, LXCWithOverlays=LXCWithOverlays, __default__=UnmanagedLXC) loader = LXCLoader(lxc_types, service) manager = LXCManager(loader, service) return LXCAPI(manager=manager, service=service)
[ "def", "default_setup", "(", ")", ":", "service", "=", "LXCService", "lxc_types", "=", "dict", "(", "LXC", "=", "LXC", ",", "LXCWithOverlays", "=", "LXCWithOverlays", ",", "__default__", "=", "UnmanagedLXC", ")", "loader", "=", "LXCLoader", "(", "lxc_types", ",", "service", ")", "manager", "=", "LXCManager", "(", "loader", ",", "service", ")", "return", "LXCAPI", "(", "manager", "=", "manager", ",", "service", "=", "service", ")" ]
The default API setup for lxc4u This is the API that you access globally from lxc4u.
[ "The", "default", "API", "setup", "for", "lxc4u" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/api.py#L6-L16
243,962
dossier/dossier.label
dossier/label/label.py
unordered_pair_eq
def unordered_pair_eq(pair1, pair2): '''Performs pairwise unordered equality. ``pair1`` == ``pair2`` if and only if ``frozenset(pair1)`` == ``frozenset(pair2)``. ''' (x1, y1), (x2, y2) = pair1, pair2 return (x1 == x2 and y1 == y2) or (x1 == y2 and y1 == x2)
python
def unordered_pair_eq(pair1, pair2): '''Performs pairwise unordered equality. ``pair1`` == ``pair2`` if and only if ``frozenset(pair1)`` == ``frozenset(pair2)``. ''' (x1, y1), (x2, y2) = pair1, pair2 return (x1 == x2 and y1 == y2) or (x1 == y2 and y1 == x2)
[ "def", "unordered_pair_eq", "(", "pair1", ",", "pair2", ")", ":", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", "=", "pair1", ",", "pair2", "return", "(", "x1", "==", "x2", "and", "y1", "==", "y2", ")", "or", "(", "x1", "==", "y2", "and", "y1", "==", "x2", ")" ]
Performs pairwise unordered equality. ``pair1`` == ``pair2`` if and only if ``frozenset(pair1)`` == ``frozenset(pair2)``.
[ "Performs", "pairwise", "unordered", "equality", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L933-L940
243,963
dossier/dossier.label
dossier/label/label.py
expand_labels
def expand_labels(labels, subtopic=False): '''Expand a set of labels that define a connected component. ``labels`` must define a *positive* connected component: it is all of the edges that make up the *single* connected component in the :class:`LabelStore`. expand will ignore subtopic assignments, and annotator_id will be an arbitrary one selected from ``labels``. Note that this function only returns the expanded labels, which is guaranteed to be disjoint with the given ``labels``. This requirement implies that ``labels`` is held in memory to ensure that no duplicates are returned. If ``subtopic`` is ``True``, then it is assumed that ``labels`` defines a ``subtopic`` connected component. In this case, subtopics are included in the expanded labels. :param labels: iterable of :class:`Label` for the connected component. :rtype: generator of expanded :class:`Label`s only ''' labels = list(labels) assert all(lab.value == CorefValue.Positive for lab in labels) # Anything to expand? if len(labels) == 0: return annotator = labels[0].annotator_id data_backed = set() connected_component = set() for label in labels: ident1, ident2 = idents_from_label(label, subtopic=subtopic) data_backed.add(normalize_pair(ident1, ident2)) connected_component.add(ident1) connected_component.add(ident2) # We do not want to rebuild the Labels we already have, # because they have true annotator_id and subtopic # fields that we may want to preserve. for ident1, ident2 in combinations(connected_component, 2): if normalize_pair(ident1, ident2) not in data_backed: (cid1, subid1), (cid2, subid2) = ident1, ident2 yield Label(cid1, cid2, annotator, CorefValue.Positive, subtopic_id1=subid1, subtopic_id2=subid2)
python
def expand_labels(labels, subtopic=False): '''Expand a set of labels that define a connected component. ``labels`` must define a *positive* connected component: it is all of the edges that make up the *single* connected component in the :class:`LabelStore`. expand will ignore subtopic assignments, and annotator_id will be an arbitrary one selected from ``labels``. Note that this function only returns the expanded labels, which is guaranteed to be disjoint with the given ``labels``. This requirement implies that ``labels`` is held in memory to ensure that no duplicates are returned. If ``subtopic`` is ``True``, then it is assumed that ``labels`` defines a ``subtopic`` connected component. In this case, subtopics are included in the expanded labels. :param labels: iterable of :class:`Label` for the connected component. :rtype: generator of expanded :class:`Label`s only ''' labels = list(labels) assert all(lab.value == CorefValue.Positive for lab in labels) # Anything to expand? if len(labels) == 0: return annotator = labels[0].annotator_id data_backed = set() connected_component = set() for label in labels: ident1, ident2 = idents_from_label(label, subtopic=subtopic) data_backed.add(normalize_pair(ident1, ident2)) connected_component.add(ident1) connected_component.add(ident2) # We do not want to rebuild the Labels we already have, # because they have true annotator_id and subtopic # fields that we may want to preserve. for ident1, ident2 in combinations(connected_component, 2): if normalize_pair(ident1, ident2) not in data_backed: (cid1, subid1), (cid2, subid2) = ident1, ident2 yield Label(cid1, cid2, annotator, CorefValue.Positive, subtopic_id1=subid1, subtopic_id2=subid2)
[ "def", "expand_labels", "(", "labels", ",", "subtopic", "=", "False", ")", ":", "labels", "=", "list", "(", "labels", ")", "assert", "all", "(", "lab", ".", "value", "==", "CorefValue", ".", "Positive", "for", "lab", "in", "labels", ")", "# Anything to expand?", "if", "len", "(", "labels", ")", "==", "0", ":", "return", "annotator", "=", "labels", "[", "0", "]", ".", "annotator_id", "data_backed", "=", "set", "(", ")", "connected_component", "=", "set", "(", ")", "for", "label", "in", "labels", ":", "ident1", ",", "ident2", "=", "idents_from_label", "(", "label", ",", "subtopic", "=", "subtopic", ")", "data_backed", ".", "add", "(", "normalize_pair", "(", "ident1", ",", "ident2", ")", ")", "connected_component", ".", "add", "(", "ident1", ")", "connected_component", ".", "add", "(", "ident2", ")", "# We do not want to rebuild the Labels we already have,", "# because they have true annotator_id and subtopic", "# fields that we may want to preserve.", "for", "ident1", ",", "ident2", "in", "combinations", "(", "connected_component", ",", "2", ")", ":", "if", "normalize_pair", "(", "ident1", ",", "ident2", ")", "not", "in", "data_backed", ":", "(", "cid1", ",", "subid1", ")", ",", "(", "cid2", ",", "subid2", ")", "=", "ident1", ",", "ident2", "yield", "Label", "(", "cid1", ",", "cid2", ",", "annotator", ",", "CorefValue", ".", "Positive", ",", "subtopic_id1", "=", "subid1", ",", "subtopic_id2", "=", "subid2", ")" ]
Expand a set of labels that define a connected component. ``labels`` must define a *positive* connected component: it is all of the edges that make up the *single* connected component in the :class:`LabelStore`. expand will ignore subtopic assignments, and annotator_id will be an arbitrary one selected from ``labels``. Note that this function only returns the expanded labels, which is guaranteed to be disjoint with the given ``labels``. This requirement implies that ``labels`` is held in memory to ensure that no duplicates are returned. If ``subtopic`` is ``True``, then it is assumed that ``labels`` defines a ``subtopic`` connected component. In this case, subtopics are included in the expanded labels. :param labels: iterable of :class:`Label` for the connected component. :rtype: generator of expanded :class:`Label`s only
[ "Expand", "a", "set", "of", "labels", "that", "define", "a", "connected", "component", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L955-L999
243,964
dossier/dossier.label
dossier/label/label.py
normalize_ident
def normalize_ident(ident): '''Splits a generic identifier. If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned. Otherwise, ``(ident[0], None)`` is returned. ''' if isinstance(ident, tuple) and len(ident) == 2: return ident[0], ident[1] # content_id, subtopic_id else: return ident, None
python
def normalize_ident(ident): '''Splits a generic identifier. If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned. Otherwise, ``(ident[0], None)`` is returned. ''' if isinstance(ident, tuple) and len(ident) == 2: return ident[0], ident[1] # content_id, subtopic_id else: return ident, None
[ "def", "normalize_ident", "(", "ident", ")", ":", "if", "isinstance", "(", "ident", ",", "tuple", ")", "and", "len", "(", "ident", ")", "==", "2", ":", "return", "ident", "[", "0", "]", ",", "ident", "[", "1", "]", "# content_id, subtopic_id", "else", ":", "return", "ident", ",", "None" ]
Splits a generic identifier. If ``ident`` is a tuple, then ``(ident[0], ident[1])`` is returned. Otherwise, ``(ident[0], None)`` is returned.
[ "Splits", "a", "generic", "identifier", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L1002-L1011
243,965
dossier/dossier.label
dossier/label/label.py
idents_from_label
def idents_from_label(lab, subtopic=False): '''Returns the "ident" of a label. If ``subtopic`` is ``True``, then a pair of pairs is returned, where each pair corresponds to the content id and subtopic id in the given label. Otherwise, a pair of pairs is returned, but the second element of each pair is always ``None``. This is a helper function that is useful for dealing with generic label identifiers. ''' if not subtopic: return (lab.content_id1, None), (lab.content_id2, None) else: return ( (lab.content_id1, lab.subtopic_id1), (lab.content_id2, lab.subtopic_id2), )
python
def idents_from_label(lab, subtopic=False): '''Returns the "ident" of a label. If ``subtopic`` is ``True``, then a pair of pairs is returned, where each pair corresponds to the content id and subtopic id in the given label. Otherwise, a pair of pairs is returned, but the second element of each pair is always ``None``. This is a helper function that is useful for dealing with generic label identifiers. ''' if not subtopic: return (lab.content_id1, None), (lab.content_id2, None) else: return ( (lab.content_id1, lab.subtopic_id1), (lab.content_id2, lab.subtopic_id2), )
[ "def", "idents_from_label", "(", "lab", ",", "subtopic", "=", "False", ")", ":", "if", "not", "subtopic", ":", "return", "(", "lab", ".", "content_id1", ",", "None", ")", ",", "(", "lab", ".", "content_id2", ",", "None", ")", "else", ":", "return", "(", "(", "lab", ".", "content_id1", ",", "lab", ".", "subtopic_id1", ")", ",", "(", "lab", ".", "content_id2", ",", "lab", ".", "subtopic_id2", ")", ",", ")" ]
Returns the "ident" of a label. If ``subtopic`` is ``True``, then a pair of pairs is returned, where each pair corresponds to the content id and subtopic id in the given label. Otherwise, a pair of pairs is returned, but the second element of each pair is always ``None``. This is a helper function that is useful for dealing with generic label identifiers.
[ "Returns", "the", "ident", "of", "a", "label", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L1018-L1037
243,966
dossier/dossier.label
dossier/label/label.py
LabelStore.put
def put(self, *labels): '''Add a new label to the store. :param label: label :type label: :class:`Label` ''' puts = [] for label in labels: k1, k2 = self._keys_from_label(label) v = self._value_from_label(label) puts.append((k1, v)) puts.append((k2, v)) self.kvl.put(self.TABLE, *puts)
python
def put(self, *labels): '''Add a new label to the store. :param label: label :type label: :class:`Label` ''' puts = [] for label in labels: k1, k2 = self._keys_from_label(label) v = self._value_from_label(label) puts.append((k1, v)) puts.append((k2, v)) self.kvl.put(self.TABLE, *puts)
[ "def", "put", "(", "self", ",", "*", "labels", ")", ":", "puts", "=", "[", "]", "for", "label", "in", "labels", ":", "k1", ",", "k2", "=", "self", ".", "_keys_from_label", "(", "label", ")", "v", "=", "self", ".", "_value_from_label", "(", "label", ")", "puts", ".", "append", "(", "(", "k1", ",", "v", ")", ")", "puts", ".", "append", "(", "(", "k2", ",", "v", ")", ")", "self", ".", "kvl", ".", "put", "(", "self", ".", "TABLE", ",", "*", "puts", ")" ]
Add a new label to the store. :param label: label :type label: :class:`Label`
[ "Add", "a", "new", "label", "to", "the", "store", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L545-L557
243,967
dossier/dossier.label
dossier/label/label.py
LabelStore.directly_connected
def directly_connected(self, ident): '''Return a generator of labels connected to ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If no labels are defined for ``ident``, then the generator will yield no labels. Note that this only returns *directly* connected labels. It will not follow transitive relationships. :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label` ''' content_id, subtopic_id = normalize_ident(ident) return self.everything(include_deleted=False, content_id=content_id, subtopic_id=subtopic_id)
python
def directly_connected(self, ident): '''Return a generator of labels connected to ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If no labels are defined for ``ident``, then the generator will yield no labels. Note that this only returns *directly* connected labels. It will not follow transitive relationships. :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label` ''' content_id, subtopic_id = normalize_ident(ident) return self.everything(include_deleted=False, content_id=content_id, subtopic_id=subtopic_id)
[ "def", "directly_connected", "(", "self", ",", "ident", ")", ":", "content_id", ",", "subtopic_id", "=", "normalize_ident", "(", "ident", ")", "return", "self", ".", "everything", "(", "include_deleted", "=", "False", ",", "content_id", "=", "content_id", ",", "subtopic_id", "=", "subtopic_id", ")" ]
Return a generator of labels connected to ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If no labels are defined for ``ident``, then the generator will yield no labels. Note that this only returns *directly* connected labels. It will not follow transitive relationships. :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label`
[ "Return", "a", "generator", "of", "labels", "connected", "to", "ident", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L634-L653
243,968
dossier/dossier.label
dossier/label/label.py
LabelStore.split_by_connected_component
def split_by_connected_component(self, idents): '''Split idents into equivalence classes based on connected components. ''' idents_remaining = set(idents) connected_components = [] for ident in idents: if ident not in idents_remaining: continue idents_remaining.remove(ident) connected_component = [ident] for label in self.connected_component(ident): cids = label.content_id1, label.content_id2 for cid in cids: if cid in idents_remaining: connected_component.append(cid) idents_remaining.remove(cid) connected_components.append(sorted(connected_component)) return connected_components
python
def split_by_connected_component(self, idents): '''Split idents into equivalence classes based on connected components. ''' idents_remaining = set(idents) connected_components = [] for ident in idents: if ident not in idents_remaining: continue idents_remaining.remove(ident) connected_component = [ident] for label in self.connected_component(ident): cids = label.content_id1, label.content_id2 for cid in cids: if cid in idents_remaining: connected_component.append(cid) idents_remaining.remove(cid) connected_components.append(sorted(connected_component)) return connected_components
[ "def", "split_by_connected_component", "(", "self", ",", "idents", ")", ":", "idents_remaining", "=", "set", "(", "idents", ")", "connected_components", "=", "[", "]", "for", "ident", "in", "idents", ":", "if", "ident", "not", "in", "idents_remaining", ":", "continue", "idents_remaining", ".", "remove", "(", "ident", ")", "connected_component", "=", "[", "ident", "]", "for", "label", "in", "self", ".", "connected_component", "(", "ident", ")", ":", "cids", "=", "label", ".", "content_id1", ",", "label", ".", "content_id2", "for", "cid", "in", "cids", ":", "if", "cid", "in", "idents_remaining", ":", "connected_component", ".", "append", "(", "cid", ")", "idents_remaining", ".", "remove", "(", "cid", ")", "connected_components", ".", "append", "(", "sorted", "(", "connected_component", ")", ")", "return", "connected_components" ]
Split idents into equivalence classes based on connected components.
[ "Split", "idents", "into", "equivalence", "classes", "based", "on", "connected", "components", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L655-L676
243,969
dossier/dossier.label
dossier/label/label.py
LabelStore.connected_component
def connected_component(self, ident): '''Return a connected component generator for ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. Given an ``ident``, return the corresponding connected component by following all positive transitivity relationships. For example, if ``(a, b, 1)`` is a label and ``(b, c, 1)`` is a label, then ``connected_component('a')`` will return both labels even though ``a`` and ``c`` are not directly connected. (Note that even though this returns a generator, it will still consume memory proportional to the number of labels in the connected component.) :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label` ''' ident = normalize_ident(ident) done = set() # set of cids that we've queried with todo = set([ident]) # set of cids to do a query for labels = set() while todo: ident = todo.pop() done.add(ident) for label in self.directly_connected(ident): if label.value != CorefValue.Positive: continue ident1, ident2 = idents_from_label( label, subtopic=ident_has_subtopic(ident)) if ident1 not in done: todo.add(ident1) if ident2 not in done: todo.add(ident2) if label not in labels: labels.add(label) yield label
python
def connected_component(self, ident): '''Return a connected component generator for ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. Given an ``ident``, return the corresponding connected component by following all positive transitivity relationships. For example, if ``(a, b, 1)`` is a label and ``(b, c, 1)`` is a label, then ``connected_component('a')`` will return both labels even though ``a`` and ``c`` are not directly connected. (Note that even though this returns a generator, it will still consume memory proportional to the number of labels in the connected component.) :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label` ''' ident = normalize_ident(ident) done = set() # set of cids that we've queried with todo = set([ident]) # set of cids to do a query for labels = set() while todo: ident = todo.pop() done.add(ident) for label in self.directly_connected(ident): if label.value != CorefValue.Positive: continue ident1, ident2 = idents_from_label( label, subtopic=ident_has_subtopic(ident)) if ident1 not in done: todo.add(ident1) if ident2 not in done: todo.add(ident2) if label not in labels: labels.add(label) yield label
[ "def", "connected_component", "(", "self", ",", "ident", ")", ":", "ident", "=", "normalize_ident", "(", "ident", ")", "done", "=", "set", "(", ")", "# set of cids that we've queried with", "todo", "=", "set", "(", "[", "ident", "]", ")", "# set of cids to do a query for", "labels", "=", "set", "(", ")", "while", "todo", ":", "ident", "=", "todo", ".", "pop", "(", ")", "done", ".", "add", "(", "ident", ")", "for", "label", "in", "self", ".", "directly_connected", "(", "ident", ")", ":", "if", "label", ".", "value", "!=", "CorefValue", ".", "Positive", ":", "continue", "ident1", ",", "ident2", "=", "idents_from_label", "(", "label", ",", "subtopic", "=", "ident_has_subtopic", "(", "ident", ")", ")", "if", "ident1", "not", "in", "done", ":", "todo", ".", "add", "(", "ident1", ")", "if", "ident2", "not", "in", "done", ":", "todo", ".", "add", "(", "ident2", ")", "if", "label", "not", "in", "labels", ":", "labels", ".", "add", "(", "label", ")", "yield", "label" ]
Return a connected component generator for ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. Given an ``ident``, return the corresponding connected component by following all positive transitivity relationships. For example, if ``(a, b, 1)`` is a label and ``(b, c, 1)`` is a label, then ``connected_component('a')`` will return both labels even though ``a`` and ``c`` are not directly connected. (Note that even though this returns a generator, it will still consume memory proportional to the number of labels in the connected component.) :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label`
[ "Return", "a", "connected", "component", "generator", "for", "ident", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L678-L718
243,970
dossier/dossier.label
dossier/label/label.py
LabelStore.expand
def expand(self, ident): '''Return expanded set of labels from a connected component. The connected component is derived from ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If ``ident`` identifies a subtopic, then expansion is done on a subtopic connected component (and expanded labels retain subtopic information). The labels returned by :meth:`LabelStore.connected_component` contains only the :class:`Label` stored in the :class:`LabelStore`, and does not include the labels you can infer from the connected component. This method returns both the data-backed labels and the inferred labels. Subtopic assignments of the expanded labels will be empty. The ``annotator_id`` will be an arbitrary ``annotator_id`` within the connected component. :param str content_id: content id :param value: coreferent value :type value: :class:`CorefValue` :rtype: ``list`` of :class:`Label` ''' subtopic = ident_has_subtopic(normalize_ident(ident)) labels = list(self.connected_component(ident)) labels.extend(expand_labels(labels, subtopic=subtopic)) return labels
python
def expand(self, ident): '''Return expanded set of labels from a connected component. The connected component is derived from ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If ``ident`` identifies a subtopic, then expansion is done on a subtopic connected component (and expanded labels retain subtopic information). The labels returned by :meth:`LabelStore.connected_component` contains only the :class:`Label` stored in the :class:`LabelStore`, and does not include the labels you can infer from the connected component. This method returns both the data-backed labels and the inferred labels. Subtopic assignments of the expanded labels will be empty. The ``annotator_id`` will be an arbitrary ``annotator_id`` within the connected component. :param str content_id: content id :param value: coreferent value :type value: :class:`CorefValue` :rtype: ``list`` of :class:`Label` ''' subtopic = ident_has_subtopic(normalize_ident(ident)) labels = list(self.connected_component(ident)) labels.extend(expand_labels(labels, subtopic=subtopic)) return labels
[ "def", "expand", "(", "self", ",", "ident", ")", ":", "subtopic", "=", "ident_has_subtopic", "(", "normalize_ident", "(", "ident", ")", ")", "labels", "=", "list", "(", "self", ".", "connected_component", "(", "ident", ")", ")", "labels", ".", "extend", "(", "expand_labels", "(", "labels", ",", "subtopic", "=", "subtopic", ")", ")", "return", "labels" ]
Return expanded set of labels from a connected component. The connected component is derived from ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. If ``ident`` identifies a subtopic, then expansion is done on a subtopic connected component (and expanded labels retain subtopic information). The labels returned by :meth:`LabelStore.connected_component` contains only the :class:`Label` stored in the :class:`LabelStore`, and does not include the labels you can infer from the connected component. This method returns both the data-backed labels and the inferred labels. Subtopic assignments of the expanded labels will be empty. The ``annotator_id`` will be an arbitrary ``annotator_id`` within the connected component. :param str content_id: content id :param value: coreferent value :type value: :class:`CorefValue` :rtype: ``list`` of :class:`Label`
[ "Return", "expanded", "set", "of", "labels", "from", "a", "connected", "component", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L720-L747
243,971
dossier/dossier.label
dossier/label/label.py
LabelStore.negative_inference
def negative_inference(self, content_id): '''Return a generator of inferred negative label relationships centered on ``content_id``. Negative labels are inferred by getting all other content ids connected to ``content_id`` through a negative label, then running :meth:`LabelStore.negative_label_inference` on those labels. See :meth:`LabelStore.negative_label_inference` for more information. ''' neg_labels = ifilter(lambda l: l.value == CorefValue.Negative, self.directly_connected(content_id)) for label in neg_labels: label_inf = self.negative_label_inference(label) for label in label_inf: yield label
python
def negative_inference(self, content_id): '''Return a generator of inferred negative label relationships centered on ``content_id``. Negative labels are inferred by getting all other content ids connected to ``content_id`` through a negative label, then running :meth:`LabelStore.negative_label_inference` on those labels. See :meth:`LabelStore.negative_label_inference` for more information. ''' neg_labels = ifilter(lambda l: l.value == CorefValue.Negative, self.directly_connected(content_id)) for label in neg_labels: label_inf = self.negative_label_inference(label) for label in label_inf: yield label
[ "def", "negative_inference", "(", "self", ",", "content_id", ")", ":", "neg_labels", "=", "ifilter", "(", "lambda", "l", ":", "l", ".", "value", "==", "CorefValue", ".", "Negative", ",", "self", ".", "directly_connected", "(", "content_id", ")", ")", "for", "label", "in", "neg_labels", ":", "label_inf", "=", "self", ".", "negative_label_inference", "(", "label", ")", "for", "label", "in", "label_inf", ":", "yield", "label" ]
Return a generator of inferred negative label relationships centered on ``content_id``. Negative labels are inferred by getting all other content ids connected to ``content_id`` through a negative label, then running :meth:`LabelStore.negative_label_inference` on those labels. See :meth:`LabelStore.negative_label_inference` for more information.
[ "Return", "a", "generator", "of", "inferred", "negative", "label", "relationships", "centered", "on", "content_id", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L749-L764
243,972
dossier/dossier.label
dossier/label/label.py
LabelStore.negative_label_inference
def negative_label_inference(self, label): '''Return a generator of inferred negative label relationships. Construct ad-hoc negative labels between ``label.content_id1`` and the positive connected component of ``label.content_id2``, and ``label.content_id2`` to the connected component of ``label.content_id1``. Note this will allocate memory proportional to the size of the connected components of ``label.content_id1`` and ``label.content_id2``. ''' assert label.value == CorefValue.Negative yield label cid2_comp = self.connected_component(label.content_id2) for comp_label in cid2_comp: comp_cids = (comp_label.content_id1, comp_label.content_id2) for comp_cid in comp_cids: if not comp_cid == label.content_id2: yield Label(label.content_id1, comp_cid, 'auto', CorefValue.Negative) cid1_comp = self.connected_component(label.content_id1) for comp_label in cid1_comp: comp_cids = (comp_label.content_id1, comp_label.content_id2) for comp_cid in comp_cids: if not comp_cid == label.content_id1: yield Label(label.content_id2, comp_cid, 'auto', CorefValue.Negative)
python
def negative_label_inference(self, label): '''Return a generator of inferred negative label relationships. Construct ad-hoc negative labels between ``label.content_id1`` and the positive connected component of ``label.content_id2``, and ``label.content_id2`` to the connected component of ``label.content_id1``. Note this will allocate memory proportional to the size of the connected components of ``label.content_id1`` and ``label.content_id2``. ''' assert label.value == CorefValue.Negative yield label cid2_comp = self.connected_component(label.content_id2) for comp_label in cid2_comp: comp_cids = (comp_label.content_id1, comp_label.content_id2) for comp_cid in comp_cids: if not comp_cid == label.content_id2: yield Label(label.content_id1, comp_cid, 'auto', CorefValue.Negative) cid1_comp = self.connected_component(label.content_id1) for comp_label in cid1_comp: comp_cids = (comp_label.content_id1, comp_label.content_id2) for comp_cid in comp_cids: if not comp_cid == label.content_id1: yield Label(label.content_id2, comp_cid, 'auto', CorefValue.Negative)
[ "def", "negative_label_inference", "(", "self", ",", "label", ")", ":", "assert", "label", ".", "value", "==", "CorefValue", ".", "Negative", "yield", "label", "cid2_comp", "=", "self", ".", "connected_component", "(", "label", ".", "content_id2", ")", "for", "comp_label", "in", "cid2_comp", ":", "comp_cids", "=", "(", "comp_label", ".", "content_id1", ",", "comp_label", ".", "content_id2", ")", "for", "comp_cid", "in", "comp_cids", ":", "if", "not", "comp_cid", "==", "label", ".", "content_id2", ":", "yield", "Label", "(", "label", ".", "content_id1", ",", "comp_cid", ",", "'auto'", ",", "CorefValue", ".", "Negative", ")", "cid1_comp", "=", "self", ".", "connected_component", "(", "label", ".", "content_id1", ")", "for", "comp_label", "in", "cid1_comp", ":", "comp_cids", "=", "(", "comp_label", ".", "content_id1", ",", "comp_label", ".", "content_id2", ")", "for", "comp_cid", "in", "comp_cids", ":", "if", "not", "comp_cid", "==", "label", ".", "content_id1", ":", "yield", "Label", "(", "label", ".", "content_id2", ",", "comp_cid", ",", "'auto'", ",", "CorefValue", ".", "Negative", ")" ]
Return a generator of inferred negative label relationships. Construct ad-hoc negative labels between ``label.content_id1`` and the positive connected component of ``label.content_id2``, and ``label.content_id2`` to the connected component of ``label.content_id1``. Note this will allocate memory proportional to the size of the connected components of ``label.content_id1`` and ``label.content_id2``.
[ "Return", "a", "generator", "of", "inferred", "negative", "label", "relationships", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L766-L798
243,973
dossier/dossier.label
dossier/label/label.py
LabelStore._filter_keys
def _filter_keys(self, content_id=None, prefix=None, subtopic_id=None): '''Filter out-of-order labels by key tuple. :class:`Label` always sorts by `(cid1,cid2,sid1,sid2)`, but for efficient lookups on `cid2` this class also stores in order `(cid2,cid1,sid2,sid1)`. Filter out things that are in the wrong order. But, if an original query specified `content_id` or `subtopic_id`, account for the possibility that something might be apparently out-of-order but its dual will not be in the query at all. ''' def accept(kvp): (content_id1, content_id2, subtopic_id1, subtopic_id2, annotator_id, inverted_epoch_ticks) = kvp[0] # In general we'll accept the label if # it's the natural order; l.content_id1 == cid1 is_sorted = (content_id1 < content_id2 or (content_id1 == content_id2 and subtopic_id1 <= subtopic_id2)) if content_id is not None: # We will only see tuples where content_id1 is the # requested content ID assert content_id1 == content_id elif prefix is not None: assert content_id1.startswith(prefix) # If we'll see both orderings of this record, only # accept the sorted one if content_id2.startswith(prefix) and not is_sorted: return False elif not is_sorted: # We'll see both orderings of everything, reject the # unsorted ones return False # If we're not looking for subtopic IDs, then accept records # matching the content ID that are in natural order if subtopic_id is None: if content_id2 != content_id: return True # will never see its dual return subtopic_id1 <= subtopic_id2 # The scan range doesn't include subtopic IDs (the key schema # is oriented towards querying content-to-content labels) # so we need to accept records where either part matches # (if both parts match then the record is its own dual) if subtopic_id == subtopic_id1: return True if content_id == content_id2 and subtopic_id == subtopic_id2: return True return False return accept
python
def _filter_keys(self, content_id=None, prefix=None, subtopic_id=None): '''Filter out-of-order labels by key tuple. :class:`Label` always sorts by `(cid1,cid2,sid1,sid2)`, but for efficient lookups on `cid2` this class also stores in order `(cid2,cid1,sid2,sid1)`. Filter out things that are in the wrong order. But, if an original query specified `content_id` or `subtopic_id`, account for the possibility that something might be apparently out-of-order but its dual will not be in the query at all. ''' def accept(kvp): (content_id1, content_id2, subtopic_id1, subtopic_id2, annotator_id, inverted_epoch_ticks) = kvp[0] # In general we'll accept the label if # it's the natural order; l.content_id1 == cid1 is_sorted = (content_id1 < content_id2 or (content_id1 == content_id2 and subtopic_id1 <= subtopic_id2)) if content_id is not None: # We will only see tuples where content_id1 is the # requested content ID assert content_id1 == content_id elif prefix is not None: assert content_id1.startswith(prefix) # If we'll see both orderings of this record, only # accept the sorted one if content_id2.startswith(prefix) and not is_sorted: return False elif not is_sorted: # We'll see both orderings of everything, reject the # unsorted ones return False # If we're not looking for subtopic IDs, then accept records # matching the content ID that are in natural order if subtopic_id is None: if content_id2 != content_id: return True # will never see its dual return subtopic_id1 <= subtopic_id2 # The scan range doesn't include subtopic IDs (the key schema # is oriented towards querying content-to-content labels) # so we need to accept records where either part matches # (if both parts match then the record is its own dual) if subtopic_id == subtopic_id1: return True if content_id == content_id2 and subtopic_id == subtopic_id2: return True return False return accept
[ "def", "_filter_keys", "(", "self", ",", "content_id", "=", "None", ",", "prefix", "=", "None", ",", "subtopic_id", "=", "None", ")", ":", "def", "accept", "(", "kvp", ")", ":", "(", "content_id1", ",", "content_id2", ",", "subtopic_id1", ",", "subtopic_id2", ",", "annotator_id", ",", "inverted_epoch_ticks", ")", "=", "kvp", "[", "0", "]", "# In general we'll accept the label if", "# it's the natural order; l.content_id1 == cid1", "is_sorted", "=", "(", "content_id1", "<", "content_id2", "or", "(", "content_id1", "==", "content_id2", "and", "subtopic_id1", "<=", "subtopic_id2", ")", ")", "if", "content_id", "is", "not", "None", ":", "# We will only see tuples where content_id1 is the", "# requested content ID", "assert", "content_id1", "==", "content_id", "elif", "prefix", "is", "not", "None", ":", "assert", "content_id1", ".", "startswith", "(", "prefix", ")", "# If we'll see both orderings of this record, only", "# accept the sorted one", "if", "content_id2", ".", "startswith", "(", "prefix", ")", "and", "not", "is_sorted", ":", "return", "False", "elif", "not", "is_sorted", ":", "# We'll see both orderings of everything, reject the", "# unsorted ones", "return", "False", "# If we're not looking for subtopic IDs, then accept records", "# matching the content ID that are in natural order", "if", "subtopic_id", "is", "None", ":", "if", "content_id2", "!=", "content_id", ":", "return", "True", "# will never see its dual", "return", "subtopic_id1", "<=", "subtopic_id2", "# The scan range doesn't include subtopic IDs (the key schema", "# is oriented towards querying content-to-content labels)", "# so we need to accept records where either part matches", "# (if both parts match then the record is its own dual)", "if", "subtopic_id", "==", "subtopic_id1", ":", "return", "True", "if", "content_id", "==", "content_id2", "and", "subtopic_id", "==", "subtopic_id2", ":", "return", "True", "return", "False", "return", "accept" ]
Filter out-of-order labels by key tuple. :class:`Label` always sorts by `(cid1,cid2,sid1,sid2)`, but for efficient lookups on `cid2` this class also stores in order `(cid2,cid1,sid2,sid1)`. Filter out things that are in the wrong order. But, if an original query specified `content_id` or `subtopic_id`, account for the possibility that something might be apparently out-of-order but its dual will not be in the query at all.
[ "Filter", "out", "-", "of", "-", "order", "labels", "by", "key", "tuple", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L800-L850
243,974
dossier/dossier.label
dossier/label/label.py
LabelStore.apply_diff
def apply_diff(self, diff): '''Applies a diff to the label table. A ``diff`` is a dictionary with three keys: ``add``, ``delete`` and ``change``. Each key should map to a list of labels. ``add`` corresponds to the labels that are in ``new`` but not in ``old``. ``delete`` corresponds to the labels that are in ``old`` but not in ``new``. ``change`` corresponds to the labels that are in both ``old`` and ``new`` but have different coref/rating values. ''' # add and change are easy---just put the labels. # delete is a little trickier. We need to scrub out the impact of # the previous label without actually deleting it. For this, we # use an unknown coref value. insert = ( diff['add'] + diff['change'] + [lab.update(value=CorefValue.Unknown) for lab in diff['delete']] ) self.put(*insert)
python
def apply_diff(self, diff): '''Applies a diff to the label table. A ``diff`` is a dictionary with three keys: ``add``, ``delete`` and ``change``. Each key should map to a list of labels. ``add`` corresponds to the labels that are in ``new`` but not in ``old``. ``delete`` corresponds to the labels that are in ``old`` but not in ``new``. ``change`` corresponds to the labels that are in both ``old`` and ``new`` but have different coref/rating values. ''' # add and change are easy---just put the labels. # delete is a little trickier. We need to scrub out the impact of # the previous label without actually deleting it. For this, we # use an unknown coref value. insert = ( diff['add'] + diff['change'] + [lab.update(value=CorefValue.Unknown) for lab in diff['delete']] ) self.put(*insert)
[ "def", "apply_diff", "(", "self", ",", "diff", ")", ":", "# add and change are easy---just put the labels.", "# delete is a little trickier. We need to scrub out the impact of", "# the previous label without actually deleting it. For this, we", "# use an unknown coref value.", "insert", "=", "(", "diff", "[", "'add'", "]", "+", "diff", "[", "'change'", "]", "+", "[", "lab", ".", "update", "(", "value", "=", "CorefValue", ".", "Unknown", ")", "for", "lab", "in", "diff", "[", "'delete'", "]", "]", ")", "self", ".", "put", "(", "*", "insert", ")" ]
Applies a diff to the label table. A ``diff`` is a dictionary with three keys: ``add``, ``delete`` and ``change``. Each key should map to a list of labels. ``add`` corresponds to the labels that are in ``new`` but not in ``old``. ``delete`` corresponds to the labels that are in ``old`` but not in ``new``. ``change`` corresponds to the labels that are in both ``old`` and ``new`` but have different coref/rating values.
[ "Applies", "a", "diff", "to", "the", "label", "table", "." ]
d445e56b02ffd91ad46b0872cfbff62b9afef7ec
https://github.com/dossier/dossier.label/blob/d445e56b02ffd91ad46b0872cfbff62b9afef7ec/dossier/label/label.py#L905-L930
243,975
callowayproject/Transmogrify
transmogrify/filesystem/__init__.py
file_exists
def file_exists(original_file): """ Check to make sure the original file exists """ if original_file.startswith("s3://"): from filesystem import s3 return s3.file_exists(original_file) else: if not os.path.exists(original_file): return False if not os.path.isfile(original_file): return False return True
python
def file_exists(original_file): """ Check to make sure the original file exists """ if original_file.startswith("s3://"): from filesystem import s3 return s3.file_exists(original_file) else: if not os.path.exists(original_file): return False if not os.path.isfile(original_file): return False return True
[ "def", "file_exists", "(", "original_file", ")", ":", "if", "original_file", ".", "startswith", "(", "\"s3://\"", ")", ":", "from", "filesystem", "import", "s3", "return", "s3", ".", "file_exists", "(", "original_file", ")", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "original_file", ")", ":", "return", "False", "if", "not", "os", ".", "path", ".", "isfile", "(", "original_file", ")", ":", "return", "False", "return", "True" ]
Check to make sure the original file exists
[ "Check", "to", "make", "sure", "the", "original", "file", "exists" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/filesystem/__init__.py#L29-L41
243,976
scottgigante/tasklogger
tasklogger/logger.py
TaskLogger.complete_task
def complete_task(self, task): """Complete logging of a task Returns the time lapsed since `start_task` was called Parameters ---------- task : str Name of the task to be started Returns ------- time : float The time lapsed between task start and completion """ try: runtime = self.timer() - self.tasks[task] del self.tasks[task] if runtime >= self.min_runtime: self.info("Calculated {} in {:.2f} seconds.".format( task, runtime)) return runtime except KeyError: self.info("Calculated {}.".format(task))
python
def complete_task(self, task): """Complete logging of a task Returns the time lapsed since `start_task` was called Parameters ---------- task : str Name of the task to be started Returns ------- time : float The time lapsed between task start and completion """ try: runtime = self.timer() - self.tasks[task] del self.tasks[task] if runtime >= self.min_runtime: self.info("Calculated {} in {:.2f} seconds.".format( task, runtime)) return runtime except KeyError: self.info("Calculated {}.".format(task))
[ "def", "complete_task", "(", "self", ",", "task", ")", ":", "try", ":", "runtime", "=", "self", ".", "timer", "(", ")", "-", "self", ".", "tasks", "[", "task", "]", "del", "self", ".", "tasks", "[", "task", "]", "if", "runtime", ">=", "self", ".", "min_runtime", ":", "self", ".", "info", "(", "\"Calculated {} in {:.2f} seconds.\"", ".", "format", "(", "task", ",", "runtime", ")", ")", "return", "runtime", "except", "KeyError", ":", "self", ".", "info", "(", "\"Calculated {}.\"", ".", "format", "(", "task", ")", ")" ]
Complete logging of a task Returns the time lapsed since `start_task` was called Parameters ---------- task : str Name of the task to be started Returns ------- time : float The time lapsed between task start and completion
[ "Complete", "logging", "of", "a", "task" ]
06a263715d2db0653615c17b2df14b8272967b8d
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/logger.py#L220-L243
243,977
etcher-be/emiz
emiz/edit_miz.py
edit_miz
def edit_miz( # noqa: C901 infile: str, outfile: str = None, metar: typing.Union[str, Metar] = None, time: str = None, min_wind: int = 0, max_wind: int = 40 ) -> str: # noinspection SpellCheckingInspection """ Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error """ if outfile is None: LOGGER.debug('editing in place: %s', infile) outfile = infile else: LOGGER.debug('editing miz file: %s -> %s', infile, outfile) mission_weather = mission_time = None if metar: error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar) if error: return error mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind) if time: try: mission_time = MissionTime.from_string(time) except ValueError: return f'badly formatted time string: {time}' if not mission_weather and not mission_time: return 'nothing to do!' with Miz(infile) as miz: if mission_weather: LOGGER.debug('applying MissionWeather') if not mission_weather.apply_to_miz(miz): return 'error while applying METAR to mission' if mission_time: LOGGER.debug('applying MissionTime') if not mission_time.apply_to_miz(miz): return 'error while setting time on mission' try: miz.zip(outfile) return '' except OSError: return f'permission error: cannot edit "{outfile}"; maybe it is in use ?'
python
def edit_miz( # noqa: C901 infile: str, outfile: str = None, metar: typing.Union[str, Metar] = None, time: str = None, min_wind: int = 0, max_wind: int = 40 ) -> str: # noinspection SpellCheckingInspection """ Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error """ if outfile is None: LOGGER.debug('editing in place: %s', infile) outfile = infile else: LOGGER.debug('editing miz file: %s -> %s', infile, outfile) mission_weather = mission_time = None if metar: error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar) if error: return error mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind) if time: try: mission_time = MissionTime.from_string(time) except ValueError: return f'badly formatted time string: {time}' if not mission_weather and not mission_time: return 'nothing to do!' with Miz(infile) as miz: if mission_weather: LOGGER.debug('applying MissionWeather') if not mission_weather.apply_to_miz(miz): return 'error while applying METAR to mission' if mission_time: LOGGER.debug('applying MissionTime') if not mission_time.apply_to_miz(miz): return 'error while setting time on mission' try: miz.zip(outfile) return '' except OSError: return f'permission error: cannot edit "{outfile}"; maybe it is in use ?'
[ "def", "edit_miz", "(", "# noqa: C901", "infile", ":", "str", ",", "outfile", ":", "str", "=", "None", ",", "metar", ":", "typing", ".", "Union", "[", "str", ",", "Metar", "]", "=", "None", ",", "time", ":", "str", "=", "None", ",", "min_wind", ":", "int", "=", "0", ",", "max_wind", ":", "int", "=", "40", ")", "->", "str", ":", "# noinspection SpellCheckingInspection", "if", "outfile", "is", "None", ":", "LOGGER", ".", "debug", "(", "'editing in place: %s'", ",", "infile", ")", "outfile", "=", "infile", "else", ":", "LOGGER", ".", "debug", "(", "'editing miz file: %s -> %s'", ",", "infile", ",", "outfile", ")", "mission_weather", "=", "mission_time", "=", "None", "if", "metar", ":", "error", ",", "metar", "=", "emiz", ".", "weather", ".", "custom_metar", ".", "CustomMetar", ".", "get_metar", "(", "metar", ")", "if", "error", ":", "return", "error", "mission_weather", "=", "emiz", ".", "weather", ".", "mission_weather", ".", "MissionWeather", "(", "metar", ",", "min_wind", "=", "min_wind", ",", "max_wind", "=", "max_wind", ")", "if", "time", ":", "try", ":", "mission_time", "=", "MissionTime", ".", "from_string", "(", "time", ")", "except", "ValueError", ":", "return", "f'badly formatted time string: {time}'", "if", "not", "mission_weather", "and", "not", "mission_time", ":", "return", "'nothing to do!'", "with", "Miz", "(", "infile", ")", "as", "miz", ":", "if", "mission_weather", ":", "LOGGER", ".", "debug", "(", "'applying MissionWeather'", ")", "if", "not", "mission_weather", ".", "apply_to_miz", "(", "miz", ")", ":", "return", "'error while applying METAR to mission'", "if", "mission_time", ":", "LOGGER", ".", "debug", "(", "'applying MissionTime'", ")", "if", "not", "mission_time", ".", "apply_to_miz", "(", "miz", ")", ":", "return", "'error while setting time on mission'", "try", ":", "miz", ".", "zip", "(", "outfile", ")", "return", "''", "except", "OSError", ":", "return", "f'permission error: cannot edit \"{outfile}\"; maybe it is in use ?'" ]
Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error
[ "Edit", "an", "opened", "MIZ", "file", "and", "sets", "the", "time", "and", "date", "and", "the", "weather" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/edit_miz.py#L19-L80
243,978
ikalnytskyi/dooku
dooku/conf.py
Conf.from_file
def from_file(self, loader, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from some file. :param loader: (function) a function that receives a file object and returns a dictionary to be merged into settings :param filename: (str) a filename to be opened :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.4.0 """ conf = {} try: with open(filename, encoding=encoding) as f: conf = loader(f) except Exception: if not silent: raise self.update(conf)
python
def from_file(self, loader, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from some file. :param loader: (function) a function that receives a file object and returns a dictionary to be merged into settings :param filename: (str) a filename to be opened :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.4.0 """ conf = {} try: with open(filename, encoding=encoding) as f: conf = loader(f) except Exception: if not silent: raise self.update(conf)
[ "def", "from_file", "(", "self", ",", "loader", ",", "filename", ",", "encoding", "=", "'utf-8'", ",", "silent", "=", "False", ")", ":", "conf", "=", "{", "}", "try", ":", "with", "open", "(", "filename", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "conf", "=", "loader", "(", "f", ")", "except", "Exception", ":", "if", "not", "silent", ":", "raise", "self", ".", "update", "(", "conf", ")" ]
Updates recursively the value in the the config from some file. :param loader: (function) a function that receives a file object and returns a dictionary to be merged into settings :param filename: (str) a filename to be opened :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.4.0
[ "Updates", "recursively", "the", "value", "in", "the", "the", "config", "from", "some", "file", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/conf.py#L117-L136
243,979
ikalnytskyi/dooku
dooku/conf.py
Conf.from_json
def from_json(self, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from a JSON file. :param filename: (str) a filename of the JSON file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.3.0 """ self.from_file(json.load, filename, encoding, silent)
python
def from_json(self, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from a JSON file. :param filename: (str) a filename of the JSON file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.3.0 """ self.from_file(json.load, filename, encoding, silent)
[ "def", "from_json", "(", "self", ",", "filename", ",", "encoding", "=", "'utf-8'", ",", "silent", "=", "False", ")", ":", "self", ".", "from_file", "(", "json", ".", "load", ",", "filename", ",", "encoding", ",", "silent", ")" ]
Updates recursively the value in the the config from a JSON file. :param filename: (str) a filename of the JSON file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with json file .. versionadded:: 0.3.0
[ "Updates", "recursively", "the", "value", "in", "the", "the", "config", "from", "a", "JSON", "file", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/conf.py#L138-L148
243,980
ikalnytskyi/dooku
dooku/conf.py
Conf.from_yaml
def from_yaml(self, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from a YAML file. The method requires the PyYAML to be installed. :param filename: (str) a filename of the YAML file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with yaml file .. versionadded:: 0.3.0 """ if not yaml: raise AttributeError( 'You need to install PyYAML before using this method!') self.from_file(yaml.load, filename, encoding, silent)
python
def from_yaml(self, filename, encoding='utf-8', silent=False): """ Updates recursively the value in the the config from a YAML file. The method requires the PyYAML to be installed. :param filename: (str) a filename of the YAML file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with yaml file .. versionadded:: 0.3.0 """ if not yaml: raise AttributeError( 'You need to install PyYAML before using this method!') self.from_file(yaml.load, filename, encoding, silent)
[ "def", "from_yaml", "(", "self", ",", "filename", ",", "encoding", "=", "'utf-8'", ",", "silent", "=", "False", ")", ":", "if", "not", "yaml", ":", "raise", "AttributeError", "(", "'You need to install PyYAML before using this method!'", ")", "self", ".", "from_file", "(", "yaml", ".", "load", ",", "filename", ",", "encoding", ",", "silent", ")" ]
Updates recursively the value in the the config from a YAML file. The method requires the PyYAML to be installed. :param filename: (str) a filename of the YAML file :param encoding: (str) an encoding of the filename :param silent: (bool) fails silently if something wrong with yaml file .. versionadded:: 0.3.0
[ "Updates", "recursively", "the", "value", "in", "the", "the", "config", "from", "a", "YAML", "file", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/conf.py#L150-L166
243,981
ikalnytskyi/dooku
dooku/conf.py
Conf.update
def update(self, iterable={}, **kwargs): """ Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff """ def _merge(a, *args): for key, value in itertools.chain(*args): if key in a and isinstance(value, (dict, Conf)): value = _merge(a[key], value.items()) a[key] = value return a # adopt iterable sequence to unified interface: (key, value) if isinstance(iterable, (dict, Conf)): iterable = iterable.items() # iterate and update values _merge(self._data, iterable, kwargs.items())
python
def update(self, iterable={}, **kwargs): """ Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff """ def _merge(a, *args): for key, value in itertools.chain(*args): if key in a and isinstance(value, (dict, Conf)): value = _merge(a[key], value.items()) a[key] = value return a # adopt iterable sequence to unified interface: (key, value) if isinstance(iterable, (dict, Conf)): iterable = iterable.items() # iterate and update values _merge(self._data, iterable, kwargs.items())
[ "def", "update", "(", "self", ",", "iterable", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "def", "_merge", "(", "a", ",", "*", "args", ")", ":", "for", "key", ",", "value", "in", "itertools", ".", "chain", "(", "*", "args", ")", ":", "if", "key", "in", "a", "and", "isinstance", "(", "value", ",", "(", "dict", ",", "Conf", ")", ")", ":", "value", "=", "_merge", "(", "a", "[", "key", "]", ",", "value", ".", "items", "(", ")", ")", "a", "[", "key", "]", "=", "value", "return", "a", "# adopt iterable sequence to unified interface: (key, value)", "if", "isinstance", "(", "iterable", ",", "(", "dict", ",", "Conf", ")", ")", ":", "iterable", "=", "iterable", ".", "items", "(", ")", "# iterate and update values", "_merge", "(", "self", ".", "_data", ",", "iterable", ",", "kwargs", ".", "items", "(", ")", ")" ]
Updates recursively a self with a given iterable. TODO: rewrite this ugly stuff
[ "Updates", "recursively", "a", "self", "with", "a", "given", "iterable", "." ]
77e6c82c9c41211c86ee36ae5e591d477945fedf
https://github.com/ikalnytskyi/dooku/blob/77e6c82c9c41211c86ee36ae5e591d477945fedf/dooku/conf.py#L168-L186
243,982
lambdalisue/maidenhair
src/maidenhair/filters/baseline.py
baseline
def baseline(dataset, column=1, fn=None, fail_silently=True): """ Substract baseline from the dataset Parameters ---------- dataset : list of numpy array list A list of numpy array list column : integer An index of column which will be proceeded fn : function A function which require data and return baseline. If it is `None`, the first value of data will be used for subtracting fail_silently : boolean If `True`, do not raise exception if no data exists Returns ------- ndarray A list of numpy array list Examples -------- >>> import numpy as np >>> from maidenhair.filters.baseline import baseline >>> dataset = [] >>> dataset.append([np.array([0, 1, 2]), np.array([3, 4, 5])]) >>> dataset.append([np.array([0, 1, 2]), np.array([3, 5, 7])]) >>> dataset.append([np.array([0, 1, 2]), np.array([100, 103, 106])]) >>> expected = [ ... [np.array([0, 1, 2]), np.array([0, 1, 2])], ... [np.array([0, 1, 2]), np.array([0, 2, 4])], ... [np.array([0, 1, 2]), np.array([0, 3, 6])], ... ] >>> proceed = baseline(dataset) >>> np.array_equal(proceed, expected) True """ try: if fn is None: fn = lambda columns, column: columns[column][0] for i, data in enumerate(dataset): _baseline = fn(data, column=column) dataset[i][column] -= _baseline return dataset except IndexError, e: if fail_silently: # fail silently return dataset raise e
python
def baseline(dataset, column=1, fn=None, fail_silently=True): """ Substract baseline from the dataset Parameters ---------- dataset : list of numpy array list A list of numpy array list column : integer An index of column which will be proceeded fn : function A function which require data and return baseline. If it is `None`, the first value of data will be used for subtracting fail_silently : boolean If `True`, do not raise exception if no data exists Returns ------- ndarray A list of numpy array list Examples -------- >>> import numpy as np >>> from maidenhair.filters.baseline import baseline >>> dataset = [] >>> dataset.append([np.array([0, 1, 2]), np.array([3, 4, 5])]) >>> dataset.append([np.array([0, 1, 2]), np.array([3, 5, 7])]) >>> dataset.append([np.array([0, 1, 2]), np.array([100, 103, 106])]) >>> expected = [ ... [np.array([0, 1, 2]), np.array([0, 1, 2])], ... [np.array([0, 1, 2]), np.array([0, 2, 4])], ... [np.array([0, 1, 2]), np.array([0, 3, 6])], ... ] >>> proceed = baseline(dataset) >>> np.array_equal(proceed, expected) True """ try: if fn is None: fn = lambda columns, column: columns[column][0] for i, data in enumerate(dataset): _baseline = fn(data, column=column) dataset[i][column] -= _baseline return dataset except IndexError, e: if fail_silently: # fail silently return dataset raise e
[ "def", "baseline", "(", "dataset", ",", "column", "=", "1", ",", "fn", "=", "None", ",", "fail_silently", "=", "True", ")", ":", "try", ":", "if", "fn", "is", "None", ":", "fn", "=", "lambda", "columns", ",", "column", ":", "columns", "[", "column", "]", "[", "0", "]", "for", "i", ",", "data", "in", "enumerate", "(", "dataset", ")", ":", "_baseline", "=", "fn", "(", "data", ",", "column", "=", "column", ")", "dataset", "[", "i", "]", "[", "column", "]", "-=", "_baseline", "return", "dataset", "except", "IndexError", ",", "e", ":", "if", "fail_silently", ":", "# fail silently", "return", "dataset", "raise", "e" ]
Substract baseline from the dataset Parameters ---------- dataset : list of numpy array list A list of numpy array list column : integer An index of column which will be proceeded fn : function A function which require data and return baseline. If it is `None`, the first value of data will be used for subtracting fail_silently : boolean If `True`, do not raise exception if no data exists Returns ------- ndarray A list of numpy array list Examples -------- >>> import numpy as np >>> from maidenhair.filters.baseline import baseline >>> dataset = [] >>> dataset.append([np.array([0, 1, 2]), np.array([3, 4, 5])]) >>> dataset.append([np.array([0, 1, 2]), np.array([3, 5, 7])]) >>> dataset.append([np.array([0, 1, 2]), np.array([100, 103, 106])]) >>> expected = [ ... [np.array([0, 1, 2]), np.array([0, 1, 2])], ... [np.array([0, 1, 2]), np.array([0, 2, 4])], ... [np.array([0, 1, 2]), np.array([0, 3, 6])], ... ] >>> proceed = baseline(dataset) >>> np.array_equal(proceed, expected) True
[ "Substract", "baseline", "from", "the", "dataset" ]
d5095c1087d1f4d71cc57410492151d2803a9f0d
https://github.com/lambdalisue/maidenhair/blob/d5095c1087d1f4d71cc57410492151d2803a9f0d/src/maidenhair/filters/baseline.py#L10-L61
243,983
jic-dtool/dtool-info
dtool_info/inventory.py
_dataset_info
def _dataset_info(dataset): """Return information about dataset as a dict.""" info = {} info["uri"] = dataset.uri info["uuid"] = dataset.uuid # Computer and human readable size of dataset. tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) info["size_int"] = tot_size info["size_str"] = sizeof_fmt(tot_size) info["creator"] = dataset._admin_metadata["creator_username"] info["name"] = dataset._admin_metadata["name"] info["date"] = date_fmt(dataset._admin_metadata["frozen_at"]) info["num_items"] = len(dataset.identifiers) info["readme_content"] = dataset.get_readme_content() return info
python
def _dataset_info(dataset): """Return information about dataset as a dict.""" info = {} info["uri"] = dataset.uri info["uuid"] = dataset.uuid # Computer and human readable size of dataset. tot_size = sum([dataset.item_properties(i)["size_in_bytes"] for i in dataset.identifiers]) info["size_int"] = tot_size info["size_str"] = sizeof_fmt(tot_size) info["creator"] = dataset._admin_metadata["creator_username"] info["name"] = dataset._admin_metadata["name"] info["date"] = date_fmt(dataset._admin_metadata["frozen_at"]) info["num_items"] = len(dataset.identifiers) info["readme_content"] = dataset.get_readme_content() return info
[ "def", "_dataset_info", "(", "dataset", ")", ":", "info", "=", "{", "}", "info", "[", "\"uri\"", "]", "=", "dataset", ".", "uri", "info", "[", "\"uuid\"", "]", "=", "dataset", ".", "uuid", "# Computer and human readable size of dataset.", "tot_size", "=", "sum", "(", "[", "dataset", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "for", "i", "in", "dataset", ".", "identifiers", "]", ")", "info", "[", "\"size_int\"", "]", "=", "tot_size", "info", "[", "\"size_str\"", "]", "=", "sizeof_fmt", "(", "tot_size", ")", "info", "[", "\"creator\"", "]", "=", "dataset", ".", "_admin_metadata", "[", "\"creator_username\"", "]", "info", "[", "\"name\"", "]", "=", "dataset", ".", "_admin_metadata", "[", "\"name\"", "]", "info", "[", "\"date\"", "]", "=", "date_fmt", "(", "dataset", ".", "_admin_metadata", "[", "\"frozen_at\"", "]", ")", "info", "[", "\"num_items\"", "]", "=", "len", "(", "dataset", ".", "identifiers", ")", "info", "[", "\"readme_content\"", "]", "=", "dataset", ".", "get_readme_content", "(", ")", "return", "info" ]
Return information about dataset as a dict.
[ "Return", "information", "about", "dataset", "as", "a", "dict", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/inventory.py#L18-L40
243,984
jic-dtool/dtool-info
dtool_info/inventory.py
inventory
def inventory(uri, format): """Generate an inventory of datasets in a base URI.""" base_uri = dtoolcore.utils.sanitise_uri(uri) info = _base_uri_info(base_uri) if format is None: _cmd_line_report(info) elif format == "csv": _csv_tsv_report(info, ",") elif format == "tsv": _csv_tsv_report(info, "\t") elif format == "html": _html_report(info)
python
def inventory(uri, format): """Generate an inventory of datasets in a base URI.""" base_uri = dtoolcore.utils.sanitise_uri(uri) info = _base_uri_info(base_uri) if format is None: _cmd_line_report(info) elif format == "csv": _csv_tsv_report(info, ",") elif format == "tsv": _csv_tsv_report(info, "\t") elif format == "html": _html_report(info)
[ "def", "inventory", "(", "uri", ",", "format", ")", ":", "base_uri", "=", "dtoolcore", ".", "utils", ".", "sanitise_uri", "(", "uri", ")", "info", "=", "_base_uri_info", "(", "base_uri", ")", "if", "format", "is", "None", ":", "_cmd_line_report", "(", "info", ")", "elif", "format", "==", "\"csv\"", ":", "_csv_tsv_report", "(", "info", ",", "\",\"", ")", "elif", "format", "==", "\"tsv\"", ":", "_csv_tsv_report", "(", "info", ",", "\"\\t\"", ")", "elif", "format", "==", "\"html\"", ":", "_html_report", "(", "info", ")" ]
Generate an inventory of datasets in a base URI.
[ "Generate", "an", "inventory", "of", "datasets", "in", "a", "base", "URI", "." ]
3c6c7755f4c142e548bbfdf3b38230612fd4060a
https://github.com/jic-dtool/dtool-info/blob/3c6c7755f4c142e548bbfdf3b38230612fd4060a/dtool_info/inventory.py#L155-L167
243,985
delfick/aws_syncr
aws_syncr/filename_completer.py
custom_prompt
def custom_prompt(msg, delims="", completer=lambda: None): """Start up a prompt that with particular delims and completer""" try: orig_delims = readline.get_completer_delims() orig_completer = readline.get_completer() readline.set_completer_delims(delims) readline.set_completer(completer) try: ret = input(msg) finally: readline.set_completer_delims(orig_delims) readline.set_completer(orig_completer) return ret except EOFError: raise UserQuit()
python
def custom_prompt(msg, delims="", completer=lambda: None): """Start up a prompt that with particular delims and completer""" try: orig_delims = readline.get_completer_delims() orig_completer = readline.get_completer() readline.set_completer_delims(delims) readline.set_completer(completer) try: ret = input(msg) finally: readline.set_completer_delims(orig_delims) readline.set_completer(orig_completer) return ret except EOFError: raise UserQuit()
[ "def", "custom_prompt", "(", "msg", ",", "delims", "=", "\"\"", ",", "completer", "=", "lambda", ":", "None", ")", ":", "try", ":", "orig_delims", "=", "readline", ".", "get_completer_delims", "(", ")", "orig_completer", "=", "readline", ".", "get_completer", "(", ")", "readline", ".", "set_completer_delims", "(", "delims", ")", "readline", ".", "set_completer", "(", "completer", ")", "try", ":", "ret", "=", "input", "(", "msg", ")", "finally", ":", "readline", ".", "set_completer_delims", "(", "orig_delims", ")", "readline", ".", "set_completer", "(", "orig_completer", ")", "return", "ret", "except", "EOFError", ":", "raise", "UserQuit", "(", ")" ]
Start up a prompt that with particular delims and completer
[ "Start", "up", "a", "prompt", "that", "with", "particular", "delims", "and", "completer" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/filename_completer.py#L46-L63
243,986
mbodenhamer/syn
syn/types/a/base.py
primitive_form
def primitive_form(obj, **kwargs): '''Return obj, if possible, in a form composed of primitive or builtin objects.''' if isinstance(obj, type): return obj return Type.dispatch(obj).primitive_form(**kwargs)
python
def primitive_form(obj, **kwargs): '''Return obj, if possible, in a form composed of primitive or builtin objects.''' if isinstance(obj, type): return obj return Type.dispatch(obj).primitive_form(**kwargs)
[ "def", "primitive_form", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "obj", ",", "type", ")", ":", "return", "obj", "return", "Type", ".", "dispatch", "(", "obj", ")", ".", "primitive_form", "(", "*", "*", "kwargs", ")" ]
Return obj, if possible, in a form composed of primitive or builtin objects.
[ "Return", "obj", "if", "possible", "in", "a", "form", "composed", "of", "primitive", "or", "builtin", "objects", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/types/a/base.py#L406-L410
243,987
lwcook/horsetail-matching
horsetailmatching/parameters.py
UncertainParameter.evalPDF
def evalPDF(self, u_values): '''Returns the PDF of the uncertain parameter evaluated at the values provided in u_values. :param iterable u_values: values of the uncertain parameter at which to evaluate the PDF *Example Usage* :: >>> u = UniformParameter() >>> X = numpy.linspace(-1, 1, 100) >>> Y = [u.evalPDF(x) for x in X] ''' if isinstance(u_values, np.ndarray): return self._evalPDF(u_values) else: try: iter(u_values) return [self._evalPDF(u) for u in u_values] except: return self._evalPDF(u_values)
python
def evalPDF(self, u_values): '''Returns the PDF of the uncertain parameter evaluated at the values provided in u_values. :param iterable u_values: values of the uncertain parameter at which to evaluate the PDF *Example Usage* :: >>> u = UniformParameter() >>> X = numpy.linspace(-1, 1, 100) >>> Y = [u.evalPDF(x) for x in X] ''' if isinstance(u_values, np.ndarray): return self._evalPDF(u_values) else: try: iter(u_values) return [self._evalPDF(u) for u in u_values] except: return self._evalPDF(u_values)
[ "def", "evalPDF", "(", "self", ",", "u_values", ")", ":", "if", "isinstance", "(", "u_values", ",", "np", ".", "ndarray", ")", ":", "return", "self", ".", "_evalPDF", "(", "u_values", ")", "else", ":", "try", ":", "iter", "(", "u_values", ")", "return", "[", "self", ".", "_evalPDF", "(", "u", ")", "for", "u", "in", "u_values", "]", "except", ":", "return", "self", ".", "_evalPDF", "(", "u_values", ")" ]
Returns the PDF of the uncertain parameter evaluated at the values provided in u_values. :param iterable u_values: values of the uncertain parameter at which to evaluate the PDF *Example Usage* :: >>> u = UniformParameter() >>> X = numpy.linspace(-1, 1, 100) >>> Y = [u.evalPDF(x) for x in X]
[ "Returns", "the", "PDF", "of", "the", "uncertain", "parameter", "evaluated", "at", "the", "values", "provided", "in", "u_values", "." ]
f3d5f8d01249debbca978f412ce4eae017458119
https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/parameters.py#L91-L113
243,988
shaunduncan/helga-facts
helga_facts.py
term_regex
def term_regex(term): """ Returns a case-insensitive regex for searching terms """ return re.compile(r'^{0}$'.format(re.escape(term)), re.IGNORECASE)
python
def term_regex(term): """ Returns a case-insensitive regex for searching terms """ return re.compile(r'^{0}$'.format(re.escape(term)), re.IGNORECASE)
[ "def", "term_regex", "(", "term", ")", ":", "return", "re", ".", "compile", "(", "r'^{0}$'", ".", "format", "(", "re", ".", "escape", "(", "term", ")", ")", ",", "re", ".", "IGNORECASE", ")" ]
Returns a case-insensitive regex for searching terms
[ "Returns", "a", "case", "-", "insensitive", "regex", "for", "searching", "terms" ]
956b1d93abccdaaf318d7cac4451edc7e73bf5e9
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L23-L27
243,989
shaunduncan/helga-facts
helga_facts.py
show_fact
def show_fact(term): """ Shows a fact stored for a given term, using a case-insensitive search. If a fact has an author, it will be shown. If it has a timestamp, that will also be shown. """ logger.info('Showing fact %s', term) record = db.facts.find_one({'term': term_regex(term)}) if record is None: return None # Fix double spacing in older facts if record['fact']: record['fact'] = record['fact'].replace(' ', ' ') # If it isn't authored if not record.get('set_by', ''): return record['fact'] if 'set_date' not in record: return '{fact} ({set_by})'.format(**record) # Otherwise, do normal formatting tz = getattr(settings, 'TIMEZONE', 'US/Eastern') try: timestamp = datetime.fromtimestamp(record['set_date'], tz=pytz.timezone(tz)) except TypeError: timestamp = record['set_date'].replace(tzinfo=pytz.timezone(tz)) record['fmt_dt'] = datetime.strftime(timestamp, '%m/%d/%Y %I:%M%p') return '{fact} ({set_by} on {fmt_dt})'.format(**record)
python
def show_fact(term): """ Shows a fact stored for a given term, using a case-insensitive search. If a fact has an author, it will be shown. If it has a timestamp, that will also be shown. """ logger.info('Showing fact %s', term) record = db.facts.find_one({'term': term_regex(term)}) if record is None: return None # Fix double spacing in older facts if record['fact']: record['fact'] = record['fact'].replace(' ', ' ') # If it isn't authored if not record.get('set_by', ''): return record['fact'] if 'set_date' not in record: return '{fact} ({set_by})'.format(**record) # Otherwise, do normal formatting tz = getattr(settings, 'TIMEZONE', 'US/Eastern') try: timestamp = datetime.fromtimestamp(record['set_date'], tz=pytz.timezone(tz)) except TypeError: timestamp = record['set_date'].replace(tzinfo=pytz.timezone(tz)) record['fmt_dt'] = datetime.strftime(timestamp, '%m/%d/%Y %I:%M%p') return '{fact} ({set_by} on {fmt_dt})'.format(**record)
[ "def", "show_fact", "(", "term", ")", ":", "logger", ".", "info", "(", "'Showing fact %s'", ",", "term", ")", "record", "=", "db", ".", "facts", ".", "find_one", "(", "{", "'term'", ":", "term_regex", "(", "term", ")", "}", ")", "if", "record", "is", "None", ":", "return", "None", "# Fix double spacing in older facts", "if", "record", "[", "'fact'", "]", ":", "record", "[", "'fact'", "]", "=", "record", "[", "'fact'", "]", ".", "replace", "(", "' '", ",", "' '", ")", "# If it isn't authored", "if", "not", "record", ".", "get", "(", "'set_by'", ",", "''", ")", ":", "return", "record", "[", "'fact'", "]", "if", "'set_date'", "not", "in", "record", ":", "return", "'{fact} ({set_by})'", ".", "format", "(", "*", "*", "record", ")", "# Otherwise, do normal formatting", "tz", "=", "getattr", "(", "settings", ",", "'TIMEZONE'", ",", "'US/Eastern'", ")", "try", ":", "timestamp", "=", "datetime", ".", "fromtimestamp", "(", "record", "[", "'set_date'", "]", ",", "tz", "=", "pytz", ".", "timezone", "(", "tz", ")", ")", "except", "TypeError", ":", "timestamp", "=", "record", "[", "'set_date'", "]", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "timezone", "(", "tz", ")", ")", "record", "[", "'fmt_dt'", "]", "=", "datetime", ".", "strftime", "(", "timestamp", ",", "'%m/%d/%Y %I:%M%p'", ")", "return", "'{fact} ({set_by} on {fmt_dt})'", ".", "format", "(", "*", "*", "record", ")" ]
Shows a fact stored for a given term, using a case-insensitive search. If a fact has an author, it will be shown. If it has a timestamp, that will also be shown.
[ "Shows", "a", "fact", "stored", "for", "a", "given", "term", "using", "a", "case", "-", "insensitive", "search", ".", "If", "a", "fact", "has", "an", "author", "it", "will", "be", "shown", ".", "If", "it", "has", "a", "timestamp", "that", "will", "also", "be", "shown", "." ]
956b1d93abccdaaf318d7cac4451edc7e73bf5e9
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L30-L61
243,990
shaunduncan/helga-facts
helga_facts.py
add_fact
def add_fact(term, fact, author=''): """ Records a new fact with a given term. Optionally can set an author """ logger.info('Adding new fact %s: %s', term, fact) if not db.facts.find({'term': term_regex(term)}).count(): db.facts.insert({ 'term': term, 'fact': fact, 'set_by': author, 'set_date': time.time() }) db.facts.ensure_index('term')
python
def add_fact(term, fact, author=''): """ Records a new fact with a given term. Optionally can set an author """ logger.info('Adding new fact %s: %s', term, fact) if not db.facts.find({'term': term_regex(term)}).count(): db.facts.insert({ 'term': term, 'fact': fact, 'set_by': author, 'set_date': time.time() }) db.facts.ensure_index('term')
[ "def", "add_fact", "(", "term", ",", "fact", ",", "author", "=", "''", ")", ":", "logger", ".", "info", "(", "'Adding new fact %s: %s'", ",", "term", ",", "fact", ")", "if", "not", "db", ".", "facts", ".", "find", "(", "{", "'term'", ":", "term_regex", "(", "term", ")", "}", ")", ".", "count", "(", ")", ":", "db", ".", "facts", ".", "insert", "(", "{", "'term'", ":", "term", ",", "'fact'", ":", "fact", ",", "'set_by'", ":", "author", ",", "'set_date'", ":", "time", ".", "time", "(", ")", "}", ")", "db", ".", "facts", ".", "ensure_index", "(", "'term'", ")" ]
Records a new fact with a given term. Optionally can set an author
[ "Records", "a", "new", "fact", "with", "a", "given", "term", ".", "Optionally", "can", "set", "an", "author" ]
956b1d93abccdaaf318d7cac4451edc7e73bf5e9
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L64-L77
243,991
shaunduncan/helga-facts
helga_facts.py
forget_fact
def forget_fact(term): """ Forgets a fact by removing it from the database """ logger.info('Removing fact %s', term) db.facts.remove({'term': term_regex(term)}) return random.choice(ACKS)
python
def forget_fact(term): """ Forgets a fact by removing it from the database """ logger.info('Removing fact %s', term) db.facts.remove({'term': term_regex(term)}) return random.choice(ACKS)
[ "def", "forget_fact", "(", "term", ")", ":", "logger", ".", "info", "(", "'Removing fact %s'", ",", "term", ")", "db", ".", "facts", ".", "remove", "(", "{", "'term'", ":", "term_regex", "(", "term", ")", "}", ")", "return", "random", ".", "choice", "(", "ACKS", ")" ]
Forgets a fact by removing it from the database
[ "Forgets", "a", "fact", "by", "removing", "it", "from", "the", "database" ]
956b1d93abccdaaf318d7cac4451edc7e73bf5e9
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L80-L86
243,992
shaunduncan/helga-facts
helga_facts.py
replace_fact
def replace_fact(term, fact, author=''): """ Replaces an existing fact by removing it, then adding the new definition """ forget_fact(term) add_fact(term, fact, author) return random.choice(ACKS)
python
def replace_fact(term, fact, author=''): """ Replaces an existing fact by removing it, then adding the new definition """ forget_fact(term) add_fact(term, fact, author) return random.choice(ACKS)
[ "def", "replace_fact", "(", "term", ",", "fact", ",", "author", "=", "''", ")", ":", "forget_fact", "(", "term", ")", "add_fact", "(", "term", ",", "fact", ",", "author", ")", "return", "random", ".", "choice", "(", "ACKS", ")" ]
Replaces an existing fact by removing it, then adding the new definition
[ "Replaces", "an", "existing", "fact", "by", "removing", "it", "then", "adding", "the", "new", "definition" ]
956b1d93abccdaaf318d7cac4451edc7e73bf5e9
https://github.com/shaunduncan/helga-facts/blob/956b1d93abccdaaf318d7cac4451edc7e73bf5e9/helga_facts.py#L89-L95
243,993
torfsen/clusterpolate
docs/conf.py
remove_module_docstring
def remove_module_docstring(app, what, name, obj, options, lines): """ Ignore the docstring of the ``clusterpolate`` module. """ if what == "module" and name == "clusterpolate": del lines[:]
python
def remove_module_docstring(app, what, name, obj, options, lines): """ Ignore the docstring of the ``clusterpolate`` module. """ if what == "module" and name == "clusterpolate": del lines[:]
[ "def", "remove_module_docstring", "(", "app", ",", "what", ",", "name", ",", "obj", ",", "options", ",", "lines", ")", ":", "if", "what", "==", "\"module\"", "and", "name", "==", "\"clusterpolate\"", ":", "del", "lines", "[", ":", "]" ]
Ignore the docstring of the ``clusterpolate`` module.
[ "Ignore", "the", "docstring", "of", "the", "clusterpolate", "module", "." ]
97a94ea662eb324f75e02f9ecc0cd70505c5db31
https://github.com/torfsen/clusterpolate/blob/97a94ea662eb324f75e02f9ecc0cd70505c5db31/docs/conf.py#L224-L229
243,994
etcher-be/emiz
emiz/avwx/translate.py
wind
def wind(direction: Number, speed: Number, gust: Number, vardir: typing.List[Number] = None, # type: ignore unit: str = 'kt', cardinals: bool = True, spoken: bool = False) -> str: """ Format wind elements into a readable sentence Returns the translation string Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt """ ret = '' target = 'spoken' if spoken else 'repr' # Wind direction if direction: if direction.repr in WIND_DIR_REPR: ret += WIND_DIR_REPR[direction.repr] elif direction.value is None: ret += direction.repr else: if cardinals: ret += get_cardinal_direction(direction.value) + '-' # type: ignore ret += getattr(direction, target) # Variable direction if vardir and isinstance(vardir, list): vardir = [getattr(var, target) for var in vardir] ret += ' (variable {} to {})'.format(*vardir) # Speed if speed and speed.value: ret += f' at {speed.value}{unit}' # Gust if gust and gust.value: ret += f' gusting to {gust.value}{unit}' return ret
python
def wind(direction: Number, speed: Number, gust: Number, vardir: typing.List[Number] = None, # type: ignore unit: str = 'kt', cardinals: bool = True, spoken: bool = False) -> str: """ Format wind elements into a readable sentence Returns the translation string Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt """ ret = '' target = 'spoken' if spoken else 'repr' # Wind direction if direction: if direction.repr in WIND_DIR_REPR: ret += WIND_DIR_REPR[direction.repr] elif direction.value is None: ret += direction.repr else: if cardinals: ret += get_cardinal_direction(direction.value) + '-' # type: ignore ret += getattr(direction, target) # Variable direction if vardir and isinstance(vardir, list): vardir = [getattr(var, target) for var in vardir] ret += ' (variable {} to {})'.format(*vardir) # Speed if speed and speed.value: ret += f' at {speed.value}{unit}' # Gust if gust and gust.value: ret += f' gusting to {gust.value}{unit}' return ret
[ "def", "wind", "(", "direction", ":", "Number", ",", "speed", ":", "Number", ",", "gust", ":", "Number", ",", "vardir", ":", "typing", ".", "List", "[", "Number", "]", "=", "None", ",", "# type: ignore", "unit", ":", "str", "=", "'kt'", ",", "cardinals", ":", "bool", "=", "True", ",", "spoken", ":", "bool", "=", "False", ")", "->", "str", ":", "ret", "=", "''", "target", "=", "'spoken'", "if", "spoken", "else", "'repr'", "# Wind direction", "if", "direction", ":", "if", "direction", ".", "repr", "in", "WIND_DIR_REPR", ":", "ret", "+=", "WIND_DIR_REPR", "[", "direction", ".", "repr", "]", "elif", "direction", ".", "value", "is", "None", ":", "ret", "+=", "direction", ".", "repr", "else", ":", "if", "cardinals", ":", "ret", "+=", "get_cardinal_direction", "(", "direction", ".", "value", ")", "+", "'-'", "# type: ignore", "ret", "+=", "getattr", "(", "direction", ",", "target", ")", "# Variable direction", "if", "vardir", "and", "isinstance", "(", "vardir", ",", "list", ")", ":", "vardir", "=", "[", "getattr", "(", "var", ",", "target", ")", "for", "var", "in", "vardir", "]", "ret", "+=", "' (variable {} to {})'", ".", "format", "(", "*", "vardir", ")", "# Speed", "if", "speed", "and", "speed", ".", "value", ":", "ret", "+=", "f' at {speed.value}{unit}'", "# Gust", "if", "gust", "and", "gust", ".", "value", ":", "ret", "+=", "f' gusting to {gust.value}{unit}'", "return", "ret" ]
Format wind elements into a readable sentence Returns the translation string Ex: NNE-020 (variable 010 to 040) at 14kt gusting to 20kt
[ "Format", "wind", "elements", "into", "a", "readable", "sentence" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L77-L113
243,995
etcher-be/emiz
emiz/avwx/translate.py
visibility
def visibility(vis: Number, unit: str = 'm') -> str: """ Formats a visibility element into a string with both km and sm values Ex: 8km ( 5sm ) """ if not (vis and unit in ('m', 'sm')): return '' if vis.repr in VIS_REPR: return VIS_REPR[vis.repr] if unit == 'm': converted = vis.value * 0.000621371 converted = str(round(converted, 1)).replace('.0', '') + 'sm' # type: ignore value = str(round(vis.value / 1000, 1)).replace('.0', '') unit = 'km' elif unit == 'sm': converted = vis.value / 0.621371 converted = str(round(converted, 1)).replace('.0', '') + 'km' # type: ignore value = str(vis.value).replace('.0', '') return f'{value}{unit} ({converted})'
python
def visibility(vis: Number, unit: str = 'm') -> str: """ Formats a visibility element into a string with both km and sm values Ex: 8km ( 5sm ) """ if not (vis and unit in ('m', 'sm')): return '' if vis.repr in VIS_REPR: return VIS_REPR[vis.repr] if unit == 'm': converted = vis.value * 0.000621371 converted = str(round(converted, 1)).replace('.0', '') + 'sm' # type: ignore value = str(round(vis.value / 1000, 1)).replace('.0', '') unit = 'km' elif unit == 'sm': converted = vis.value / 0.621371 converted = str(round(converted, 1)).replace('.0', '') + 'km' # type: ignore value = str(vis.value).replace('.0', '') return f'{value}{unit} ({converted})'
[ "def", "visibility", "(", "vis", ":", "Number", ",", "unit", ":", "str", "=", "'m'", ")", "->", "str", ":", "if", "not", "(", "vis", "and", "unit", "in", "(", "'m'", ",", "'sm'", ")", ")", ":", "return", "''", "if", "vis", ".", "repr", "in", "VIS_REPR", ":", "return", "VIS_REPR", "[", "vis", ".", "repr", "]", "if", "unit", "==", "'m'", ":", "converted", "=", "vis", ".", "value", "*", "0.000621371", "converted", "=", "str", "(", "round", "(", "converted", ",", "1", ")", ")", ".", "replace", "(", "'.0'", ",", "''", ")", "+", "'sm'", "# type: ignore", "value", "=", "str", "(", "round", "(", "vis", ".", "value", "/", "1000", ",", "1", ")", ")", ".", "replace", "(", "'.0'", ",", "''", ")", "unit", "=", "'km'", "elif", "unit", "==", "'sm'", ":", "converted", "=", "vis", ".", "value", "/", "0.621371", "converted", "=", "str", "(", "round", "(", "converted", ",", "1", ")", ")", ".", "replace", "(", "'.0'", ",", "''", ")", "+", "'km'", "# type: ignore", "value", "=", "str", "(", "vis", ".", "value", ")", ".", "replace", "(", "'.0'", ",", "''", ")", "return", "f'{value}{unit} ({converted})'" ]
Formats a visibility element into a string with both km and sm values Ex: 8km ( 5sm )
[ "Formats", "a", "visibility", "element", "into", "a", "string", "with", "both", "km", "and", "sm", "values" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L122-L141
243,996
etcher-be/emiz
emiz/avwx/translate.py
temperature
def temperature(temp: Number, unit: str = 'C') -> str: """ Formats a temperature element into a string with both C and F values Used for both Temp and Dew Ex: 34°C (93°F) """ unit = unit.upper() if not (temp and unit in ('C', 'F')): return '' if unit == 'C': converted = temp.value * 1.8 + 32 converted = str(int(round(converted))) + '°F' # type: ignore elif unit == 'F': converted = (temp.value - 32) / 1.8 converted = str(int(round(converted))) + '°C' # type: ignore return f'{temp.value}°{unit} ({converted})'
python
def temperature(temp: Number, unit: str = 'C') -> str: """ Formats a temperature element into a string with both C and F values Used for both Temp and Dew Ex: 34°C (93°F) """ unit = unit.upper() if not (temp and unit in ('C', 'F')): return '' if unit == 'C': converted = temp.value * 1.8 + 32 converted = str(int(round(converted))) + '°F' # type: ignore elif unit == 'F': converted = (temp.value - 32) / 1.8 converted = str(int(round(converted))) + '°C' # type: ignore return f'{temp.value}°{unit} ({converted})'
[ "def", "temperature", "(", "temp", ":", "Number", ",", "unit", ":", "str", "=", "'C'", ")", "->", "str", ":", "unit", "=", "unit", ".", "upper", "(", ")", "if", "not", "(", "temp", "and", "unit", "in", "(", "'C'", ",", "'F'", ")", ")", ":", "return", "''", "if", "unit", "==", "'C'", ":", "converted", "=", "temp", ".", "value", "*", "1.8", "+", "32", "converted", "=", "str", "(", "int", "(", "round", "(", "converted", ")", ")", ")", "+", "'°F' ", " type: ignore", "elif", "unit", "==", "'F'", ":", "converted", "=", "(", "temp", ".", "value", "-", "32", ")", "/", "1.8", "converted", "=", "str", "(", "int", "(", "round", "(", "converted", ")", ")", ")", "+", "'°C' ", " type: ignore", "return", "f'{temp.value}°{unit} ({converted})'" ]
Formats a temperature element into a string with both C and F values Used for both Temp and Dew Ex: 34°C (93°F)
[ "Formats", "a", "temperature", "element", "into", "a", "string", "with", "both", "C", "and", "F", "values" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L144-L161
243,997
etcher-be/emiz
emiz/avwx/translate.py
altimeter
def altimeter(alt: Number, unit: str = 'hPa') -> str: """ Formats the altimter element into a string with hPa and inHg values Ex: 30.11 inHg (10.20 hPa) """ if not (alt and unit in ('hPa', 'inHg')): return '' if unit == 'hPa': value = alt.repr converted = alt.value / 33.8638866667 converted = str(round(converted, 2)) + ' inHg' # type: ignore elif unit == 'inHg': value = alt.repr[:2] + '.' + alt.repr[2:] converted = float(value) * 33.8638866667 converted = str(int(round(converted))) + ' hPa' # type: ignore return f'{value} {unit} ({converted})'
python
def altimeter(alt: Number, unit: str = 'hPa') -> str: """ Formats the altimter element into a string with hPa and inHg values Ex: 30.11 inHg (10.20 hPa) """ if not (alt and unit in ('hPa', 'inHg')): return '' if unit == 'hPa': value = alt.repr converted = alt.value / 33.8638866667 converted = str(round(converted, 2)) + ' inHg' # type: ignore elif unit == 'inHg': value = alt.repr[:2] + '.' + alt.repr[2:] converted = float(value) * 33.8638866667 converted = str(int(round(converted))) + ' hPa' # type: ignore return f'{value} {unit} ({converted})'
[ "def", "altimeter", "(", "alt", ":", "Number", ",", "unit", ":", "str", "=", "'hPa'", ")", "->", "str", ":", "if", "not", "(", "alt", "and", "unit", "in", "(", "'hPa'", ",", "'inHg'", ")", ")", ":", "return", "''", "if", "unit", "==", "'hPa'", ":", "value", "=", "alt", ".", "repr", "converted", "=", "alt", ".", "value", "/", "33.8638866667", "converted", "=", "str", "(", "round", "(", "converted", ",", "2", ")", ")", "+", "' inHg'", "# type: ignore", "elif", "unit", "==", "'inHg'", ":", "value", "=", "alt", ".", "repr", "[", ":", "2", "]", "+", "'.'", "+", "alt", ".", "repr", "[", "2", ":", "]", "converted", "=", "float", "(", "value", ")", "*", "33.8638866667", "converted", "=", "str", "(", "int", "(", "round", "(", "converted", ")", ")", ")", "+", "' hPa'", "# type: ignore", "return", "f'{value} {unit} ({converted})'" ]
Formats the altimter element into a string with hPa and inHg values Ex: 30.11 inHg (10.20 hPa)
[ "Formats", "the", "altimter", "element", "into", "a", "string", "with", "hPa", "and", "inHg", "values" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L164-L180
243,998
etcher-be/emiz
emiz/avwx/translate.py
clouds
def clouds(clds: [Cloud], unit: str = 'ft') -> str: # type: ignore """ Format cloud list into a readable sentence Returns the translation string Ex: Broken layer at 2200ft (Cumulonimbus), Overcast layer at 3600ft - Reported AGL """ if clds is None: return '' ret = [] for cloud in clds: if cloud.altitude is None: continue cloud_str = CLOUD_TRANSLATIONS[cloud.type] if cloud.modifier: cloud_str += f' ({CLOUD_TRANSLATIONS[cloud.modifier]})' ret.append(cloud_str.format(cloud.altitude * 100, unit)) if ret: return ', '.join(ret) + ' - Reported AGL' return 'Sky clear'
python
def clouds(clds: [Cloud], unit: str = 'ft') -> str: # type: ignore """ Format cloud list into a readable sentence Returns the translation string Ex: Broken layer at 2200ft (Cumulonimbus), Overcast layer at 3600ft - Reported AGL """ if clds is None: return '' ret = [] for cloud in clds: if cloud.altitude is None: continue cloud_str = CLOUD_TRANSLATIONS[cloud.type] if cloud.modifier: cloud_str += f' ({CLOUD_TRANSLATIONS[cloud.modifier]})' ret.append(cloud_str.format(cloud.altitude * 100, unit)) if ret: return ', '.join(ret) + ' - Reported AGL' return 'Sky clear'
[ "def", "clouds", "(", "clds", ":", "[", "Cloud", "]", ",", "unit", ":", "str", "=", "'ft'", ")", "->", "str", ":", "# type: ignore", "if", "clds", "is", "None", ":", "return", "''", "ret", "=", "[", "]", "for", "cloud", "in", "clds", ":", "if", "cloud", ".", "altitude", "is", "None", ":", "continue", "cloud_str", "=", "CLOUD_TRANSLATIONS", "[", "cloud", ".", "type", "]", "if", "cloud", ".", "modifier", ":", "cloud_str", "+=", "f' ({CLOUD_TRANSLATIONS[cloud.modifier]})'", "ret", ".", "append", "(", "cloud_str", ".", "format", "(", "cloud", ".", "altitude", "*", "100", ",", "unit", ")", ")", "if", "ret", ":", "return", "', '", ".", "join", "(", "ret", ")", "+", "' - Reported AGL'", "return", "'Sky clear'" ]
Format cloud list into a readable sentence Returns the translation string Ex: Broken layer at 2200ft (Cumulonimbus), Overcast layer at 3600ft - Reported AGL
[ "Format", "cloud", "list", "into", "a", "readable", "sentence" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L183-L203
243,999
etcher-be/emiz
emiz/avwx/translate.py
wxcode
def wxcode(code: str) -> str: """ Translates weather codes into readable strings Returns translated string of variable length """ if not code: return '' ret = '' if code[0] == '+': ret = 'Heavy ' code = code[1:] elif code[0] == '-': ret = 'Light ' code = code[1:] # Return code if code is not a code, ex R03/03002V03 if len(code) not in [2, 4, 6]: return code for _ in range(len(code) // 2): if code[:2] in WX_TRANSLATIONS: ret += WX_TRANSLATIONS[code[:2]] + ' ' else: ret += code[:2] code = code[2:] return ret.strip()
python
def wxcode(code: str) -> str: """ Translates weather codes into readable strings Returns translated string of variable length """ if not code: return '' ret = '' if code[0] == '+': ret = 'Heavy ' code = code[1:] elif code[0] == '-': ret = 'Light ' code = code[1:] # Return code if code is not a code, ex R03/03002V03 if len(code) not in [2, 4, 6]: return code for _ in range(len(code) // 2): if code[:2] in WX_TRANSLATIONS: ret += WX_TRANSLATIONS[code[:2]] + ' ' else: ret += code[:2] code = code[2:] return ret.strip()
[ "def", "wxcode", "(", "code", ":", "str", ")", "->", "str", ":", "if", "not", "code", ":", "return", "''", "ret", "=", "''", "if", "code", "[", "0", "]", "==", "'+'", ":", "ret", "=", "'Heavy '", "code", "=", "code", "[", "1", ":", "]", "elif", "code", "[", "0", "]", "==", "'-'", ":", "ret", "=", "'Light '", "code", "=", "code", "[", "1", ":", "]", "# Return code if code is not a code, ex R03/03002V03", "if", "len", "(", "code", ")", "not", "in", "[", "2", ",", "4", ",", "6", "]", ":", "return", "code", "for", "_", "in", "range", "(", "len", "(", "code", ")", "//", "2", ")", ":", "if", "code", "[", ":", "2", "]", "in", "WX_TRANSLATIONS", ":", "ret", "+=", "WX_TRANSLATIONS", "[", "code", "[", ":", "2", "]", "]", "+", "' '", "else", ":", "ret", "+=", "code", "[", ":", "2", "]", "code", "=", "code", "[", "2", ":", "]", "return", "ret", ".", "strip", "(", ")" ]
Translates weather codes into readable strings Returns translated string of variable length
[ "Translates", "weather", "codes", "into", "readable", "strings" ]
1c3e32711921d7e600e85558ffe5d337956372de
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/translate.py#L206-L230