repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.get_version
def get_version(self, service_id, version_number): """Get the version for a particular service.""" content = self._fetch("/service/%s/version/%d" % (service_id, version_number)) return FastlyVersion(self, content)
python
def get_version(self, service_id, version_number): """Get the version for a particular service.""" content = self._fetch("/service/%s/version/%d" % (service_id, version_number)) return FastlyVersion(self, content)
[ "def", "get_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d\"", "%", "(", "service_id", ",", "version_number", ")", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Get the version for a particular service.
[ "Get", "the", "version", "for", "a", "particular", "service", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L941-L944
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.update_version
def update_version(self, service_id, version_number, **kwargs): """Update a particular version for a particular service.""" body = self._formdata(kwargs, FastlyVersion.FIELDS) content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body) return FastlyVersion(self, content)
python
def update_version(self, service_id, version_number, **kwargs): """Update a particular version for a particular service.""" body = self._formdata(kwargs, FastlyVersion.FIELDS) content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body) return FastlyVersion(self, content)
[ "def", "update_version", "(", "self", ",", "service_id", ",", "version_number", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_formdata", "(", "kwargs", ",", "FastlyVersion", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Update a particular version for a particular service.
[ "Update", "a", "particular", "version", "for", "a", "particular", "service", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L946-L950
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.clone_version
def clone_version(self, service_id, version_number): """Clone the current configuration into a new version.""" content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
python
def clone_version(self, service_id, version_number): """Clone the current configuration into a new version.""" content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
[ "def", "clone_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/clone\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"PUT\"", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Clone the current configuration into a new version.
[ "Clone", "the", "current", "configuration", "into", "a", "new", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L953-L956
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.activate_version
def activate_version(self, service_id, version_number): """Activate the current version.""" content = self._fetch("/service/%s/version/%d/activate" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
python
def activate_version(self, service_id, version_number): """Activate the current version.""" content = self._fetch("/service/%s/version/%d/activate" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
[ "def", "activate_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/activate\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"PUT\"", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Activate the current version.
[ "Activate", "the", "current", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L959-L962
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.deactivate_version
def deactivate_version(self, service_id, version_number): """Deactivate the current version.""" content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
python
def deactivate_version(self, service_id, version_number): """Deactivate the current version.""" content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT") return FastlyVersion(self, content)
[ "def", "deactivate_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/deactivate\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"PUT\"", ")", "return", "FastlyVersion", "(", "self", ",", "content", ")" ]
Deactivate the current version.
[ "Deactivate", "the", "current", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L965-L968
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.validate_version
def validate_version(self, service_id, version_number): """Validate the version for a particular service and version.""" content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number)) return self._status(content)
python
def validate_version(self, service_id, version_number): """Validate the version for a particular service and version.""" content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number)) return self._status(content)
[ "def", "validate_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/validate\"", "%", "(", "service_id", ",", "version_number", ")", ")", "return", "self", ".", "_status", "(", "content", ")" ]
Validate the version for a particular service and version.
[ "Validate", "the", "version", "for", "a", "particular", "service", "and", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L971-L974
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.lock_version
def lock_version(self, service_id, version_number): """Locks the specified version.""" content = self._fetch("/service/%s/version/%d/lock" % (service_id, version_number)) return self._status(content)
python
def lock_version(self, service_id, version_number): """Locks the specified version.""" content = self._fetch("/service/%s/version/%d/lock" % (service_id, version_number)) return self._status(content)
[ "def", "lock_version", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/lock\"", "%", "(", "service_id", ",", "version_number", ")", ")", "return", "self", ".", "_status", "(", "content", ")" ]
Locks the specified version.
[ "Locks", "the", "specified", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L977-L980
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.list_wordpressess
def list_wordpressess(self, service_id, version_number): """Get all of the wordpresses for a specified service and version.""" content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number)) return map(lambda x: FastlyWordpress(self, x), content)
python
def list_wordpressess(self, service_id, version_number): """Get all of the wordpresses for a specified service and version.""" content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number)) return map(lambda x: FastlyWordpress(self, x), content)
[ "def", "list_wordpressess", "(", "self", ",", "service_id", ",", "version_number", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/wordpress\"", "%", "(", "service_id", ",", "version_number", ")", ")", "return", "map", "(", "lambda", "x", ":", "FastlyWordpress", "(", "self", ",", "x", ")", ",", "content", ")" ]
Get all of the wordpresses for a specified service and version.
[ "Get", "all", "of", "the", "wordpresses", "for", "a", "specified", "service", "and", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L983-L986
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.create_wordpress
def create_wordpress(self, service_id, version_number, name, path, comment=None): """Create a wordpress for the specified service and version.""" body = self._formdata({ "name": name, "path": path, "comment": comment, }, FastlyWordpress.FIELDS) content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body) return FastlyWordpress(self, content)
python
def create_wordpress(self, service_id, version_number, name, path, comment=None): """Create a wordpress for the specified service and version.""" body = self._formdata({ "name": name, "path": path, "comment": comment, }, FastlyWordpress.FIELDS) content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body) return FastlyWordpress(self, content)
[ "def", "create_wordpress", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ",", "path", ",", "comment", "=", "None", ")", ":", "body", "=", "self", ".", "_formdata", "(", "{", "\"name\"", ":", "name", ",", "\"path\"", ":", "path", ",", "\"comment\"", ":", "comment", ",", "}", ",", "FastlyWordpress", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/wordpress\"", "%", "(", "service_id", ",", "version_number", ")", ",", "method", "=", "\"POST\"", ",", "body", "=", "body", ")", "return", "FastlyWordpress", "(", "self", ",", "content", ")" ]
Create a wordpress for the specified service and version.
[ "Create", "a", "wordpress", "for", "the", "specified", "service", "and", "version", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L989-L1002
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.get_wordpress
def get_wordpress(self, service_id, version_number, name): """Get information on a specific wordpress.""" content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name)) return FastlyWordpress(self, content)
python
def get_wordpress(self, service_id, version_number, name): """Get information on a specific wordpress.""" content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name)) return FastlyWordpress(self, content)
[ "def", "get_wordpress", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/wordpress/%s\"", "%", "(", "service_id", ",", "version_number", ",", "name", ")", ")", "return", "FastlyWordpress", "(", "self", ",", "content", ")" ]
Get information on a specific wordpress.
[ "Get", "information", "on", "a", "specific", "wordpress", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L1005-L1008
obulpathi/cdn-fastly-python
fastly/__init__.py
FastlyConnection.update_wordpress
def update_wordpress(self, service_id, version_number, name_key, **kwargs): """Update a specified wordpress.""" body = self._formdata(kwargs, FastlyWordpress.FIELDS) content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyWordpress(self, content)
python
def update_wordpress(self, service_id, version_number, name_key, **kwargs): """Update a specified wordpress.""" body = self._formdata(kwargs, FastlyWordpress.FIELDS) content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyWordpress(self, content)
[ "def", "update_wordpress", "(", "self", ",", "service_id", ",", "version_number", ",", "name_key", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_formdata", "(", "kwargs", ",", "FastlyWordpress", ".", "FIELDS", ")", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/wordpress/%s\"", "%", "(", "service_id", ",", "version_number", ",", "name_key", ")", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ")", "return", "FastlyWordpress", "(", "self", ",", "content", ")" ]
Update a specified wordpress.
[ "Update", "a", "specified", "wordpress", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L1011-L1015
tsnaomi/finnsyll
finnsyll/syllabifier.py
FinnSyll.annotate
def annotate(self, word): '''Annotate 'word' for syllabification, stress, weights, and vowels.''' info = [] # e.g., [ ('\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ] for syllabification, _ in syllabify(self.normalize(word), stress=True): stresses = '' weights = '' vowels = '' for syll in syllable_split(syllabification): try: vowels += get_vowel(syll) weights += get_weight(syll) stresses += {'\'': 'P', '`': 'S'}.get(syll[0], 'U') except AttributeError: # if the syllable is vowel-less... if syll[-1].isalpha(): stresses += '*' weights += '*' vowels += '*' else: stresses += ' ' weights += ' ' vowels += ' ' info.append(( syllabification, stresses, weights, vowels, )) return info
python
def annotate(self, word): '''Annotate 'word' for syllabification, stress, weights, and vowels.''' info = [] # e.g., [ ('\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ] for syllabification, _ in syllabify(self.normalize(word), stress=True): stresses = '' weights = '' vowels = '' for syll in syllable_split(syllabification): try: vowels += get_vowel(syll) weights += get_weight(syll) stresses += {'\'': 'P', '`': 'S'}.get(syll[0], 'U') except AttributeError: # if the syllable is vowel-less... if syll[-1].isalpha(): stresses += '*' weights += '*' vowels += '*' else: stresses += ' ' weights += ' ' vowels += ' ' info.append(( syllabification, stresses, weights, vowels, )) return info
[ "def", "annotate", "(", "self", ",", "word", ")", ":", "info", "=", "[", "]", "# e.g., [ ('\\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ]", "for", "syllabification", ",", "_", "in", "syllabify", "(", "self", ".", "normalize", "(", "word", ")", ",", "stress", "=", "True", ")", ":", "stresses", "=", "''", "weights", "=", "''", "vowels", "=", "''", "for", "syll", "in", "syllable_split", "(", "syllabification", ")", ":", "try", ":", "vowels", "+=", "get_vowel", "(", "syll", ")", "weights", "+=", "get_weight", "(", "syll", ")", "stresses", "+=", "{", "'\\''", ":", "'P'", ",", "'`'", ":", "'S'", "}", ".", "get", "(", "syll", "[", "0", "]", ",", "'U'", ")", "except", "AttributeError", ":", "# if the syllable is vowel-less...", "if", "syll", "[", "-", "1", "]", ".", "isalpha", "(", ")", ":", "stresses", "+=", "'*'", "weights", "+=", "'*'", "vowels", "+=", "'*'", "else", ":", "stresses", "+=", "' '", "weights", "+=", "' '", "vowels", "+=", "' '", "info", ".", "append", "(", "(", "syllabification", ",", "stresses", ",", "weights", ",", "vowels", ",", ")", ")", "return", "info" ]
Annotate 'word' for syllabification, stress, weights, and vowels.
[ "Annotate", "word", "for", "syllabification", "stress", "weights", "and", "vowels", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/syllabifier.py#L114-L150
OLC-Bioinformatics/sipprverse
serosippr/serosippr.py
SeroSippr.runner
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Run the analyses Sippr(self, self.cutoff) self.serotype_escherichia() self.serotype_salmonella() # Create the reports self.reporter() # Print the metadata metadataprinter.MetadataPrinter(self)
python
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) # Run the analyses Sippr(self, self.cutoff) self.serotype_escherichia() self.serotype_salmonella() # Create the reports self.reporter() # Print the metadata metadataprinter.MetadataPrinter(self)
[ "def", "runner", "(", "self", ")", ":", "logging", ".", "info", "(", "'Starting {} analysis pipeline'", ".", "format", "(", "self", ".", "analysistype", ")", ")", "# Run the analyses", "Sippr", "(", "self", ",", "self", ".", "cutoff", ")", "self", ".", "serotype_escherichia", "(", ")", "self", ".", "serotype_salmonella", "(", ")", "# Create the reports", "self", ".", "reporter", "(", ")", "# Print the metadata", "metadataprinter", ".", "MetadataPrinter", "(", "self", ")" ]
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/serosippr/serosippr.py#L16-L28
OLC-Bioinformatics/sipprverse
serosippr/serosippr.py
SeroSippr.reporter
def reporter(self): """ Creates a report of the results """ logging.info('Creating {} report'.format(self.analysistype)) # Create the path in which the reports are stored make_path(self.reportpath) header = 'Strain,Serotype\n' data = '' with open(os.path.join(self.reportpath, '{}.csv'.format(self.analysistype)), 'w') as report: for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': data += sample.name + ',' if sample[self.analysistype].results: # Set the O-type as either the appropriate attribute, or O-untypable if ';'.join(sample.serosippr.o_set) == '-': otype = 'O-untypeable' else: otype = '{oset} ({opid})'.format(oset=';'.join(sample.serosippr.o_set), opid=sample.serosippr.best_o_pid) # Same as above, but for the H-type if ';'.join(sample.serosippr.h_set) == '-': htype = 'H-untypeable' else: htype = '{hset} ({hpid})'.format(hset=';'.join(sample.serosippr.h_set), hpid=sample.serosippr.best_h_pid) serotype = '{otype}:{htype}'.format(otype=otype, htype=htype) # Populate the data string data += serotype if serotype != 'O-untypeable:H-untypeable' else 'ND' data += '\n' else: data += '\n' report.write(header) report.write(data)
python
def reporter(self): """ Creates a report of the results """ logging.info('Creating {} report'.format(self.analysistype)) # Create the path in which the reports are stored make_path(self.reportpath) header = 'Strain,Serotype\n' data = '' with open(os.path.join(self.reportpath, '{}.csv'.format(self.analysistype)), 'w') as report: for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': data += sample.name + ',' if sample[self.analysistype].results: # Set the O-type as either the appropriate attribute, or O-untypable if ';'.join(sample.serosippr.o_set) == '-': otype = 'O-untypeable' else: otype = '{oset} ({opid})'.format(oset=';'.join(sample.serosippr.o_set), opid=sample.serosippr.best_o_pid) # Same as above, but for the H-type if ';'.join(sample.serosippr.h_set) == '-': htype = 'H-untypeable' else: htype = '{hset} ({hpid})'.format(hset=';'.join(sample.serosippr.h_set), hpid=sample.serosippr.best_h_pid) serotype = '{otype}:{htype}'.format(otype=otype, htype=htype) # Populate the data string data += serotype if serotype != 'O-untypeable:H-untypeable' else 'ND' data += '\n' else: data += '\n' report.write(header) report.write(data)
[ "def", "reporter", "(", "self", ")", ":", "logging", ".", "info", "(", "'Creating {} report'", ".", "format", "(", "self", ".", "analysistype", ")", ")", "# Create the path in which the reports are stored", "make_path", "(", "self", ".", "reportpath", ")", "header", "=", "'Strain,Serotype\\n'", "data", "=", "''", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'{}.csv'", ".", "format", "(", "self", ".", "analysistype", ")", ")", ",", "'w'", ")", "as", "report", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "data", "+=", "sample", ".", "name", "+", "','", "if", "sample", "[", "self", ".", "analysistype", "]", ".", "results", ":", "# Set the O-type as either the appropriate attribute, or O-untypable", "if", "';'", ".", "join", "(", "sample", ".", "serosippr", ".", "o_set", ")", "==", "'-'", ":", "otype", "=", "'O-untypeable'", "else", ":", "otype", "=", "'{oset} ({opid})'", ".", "format", "(", "oset", "=", "';'", ".", "join", "(", "sample", ".", "serosippr", ".", "o_set", ")", ",", "opid", "=", "sample", ".", "serosippr", ".", "best_o_pid", ")", "# Same as above, but for the H-type", "if", "';'", ".", "join", "(", "sample", ".", "serosippr", ".", "h_set", ")", "==", "'-'", ":", "htype", "=", "'H-untypeable'", "else", ":", "htype", "=", "'{hset} ({hpid})'", ".", "format", "(", "hset", "=", "';'", ".", "join", "(", "sample", ".", "serosippr", ".", "h_set", ")", ",", "hpid", "=", "sample", ".", "serosippr", ".", "best_h_pid", ")", "serotype", "=", "'{otype}:{htype}'", ".", "format", "(", "otype", "=", "otype", ",", "htype", "=", "htype", ")", "# Populate the data string", "data", "+=", "serotype", "if", "serotype", "!=", "'O-untypeable:H-untypeable'", "else", "'ND'", "data", "+=", "'\\n'", "else", ":", "data", "+=", "'\\n'", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
Creates a report of the results
[ "Creates", "a", "report", "of", "the", "results" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/serosippr/serosippr.py#L30-L65
OLC-Bioinformatics/sipprverse
serosippr/serosippr.py
SeroSippr.serotype_escherichia
def serotype_escherichia(self): """ Create attributes storing the best results for the O and H types """ for sample in self.runmetadata.samples: # Initialise negative results to be overwritten when necessary sample[self.analysistype].best_o_pid = '-' sample[self.analysistype].o_genes = ['-'] sample[self.analysistype].o_set = ['-'] sample[self.analysistype].best_h_pid = '-' sample[self.analysistype].h_genes = ['-'] sample[self.analysistype].h_set = ['-'] if sample.general.bestassemblyfile != 'NA': if sample.general.closestrefseqgenus == 'Escherichia': o = dict() h = dict() for result, percentid in sample[self.analysistype].results.items(): if 'O' in result.split('_')[-1]: o.update({result: float(percentid)}) if 'H' in result.split('_')[-1]: h.update({result: float(percentid)}) # O try: sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True) sample[self.analysistype].best_o_pid = str(sorted_o[0][1]) sample[self.analysistype].o_genes = [gene for gene, pid in o.items() if str(pid) == sample[self.analysistype].best_o_pid] sample[self.analysistype].o_set = \ list(set(gene.split('_')[-1] for gene in sample[self.analysistype].o_genes)) except (KeyError, IndexError): pass # H try: sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True) sample[self.analysistype].best_h_pid = str(sorted_h[0][1]) sample[self.analysistype].h_genes = [gene for gene, pid in h.items() if str(pid) == sample[self.analysistype].best_h_pid] sample[self.analysistype].h_set = \ list(set(gene.split('_')[-1] for gene in sample[self.analysistype].h_genes)) except (KeyError, IndexError): pass
python
def serotype_escherichia(self): """ Create attributes storing the best results for the O and H types """ for sample in self.runmetadata.samples: # Initialise negative results to be overwritten when necessary sample[self.analysistype].best_o_pid = '-' sample[self.analysistype].o_genes = ['-'] sample[self.analysistype].o_set = ['-'] sample[self.analysistype].best_h_pid = '-' sample[self.analysistype].h_genes = ['-'] sample[self.analysistype].h_set = ['-'] if sample.general.bestassemblyfile != 'NA': if sample.general.closestrefseqgenus == 'Escherichia': o = dict() h = dict() for result, percentid in sample[self.analysistype].results.items(): if 'O' in result.split('_')[-1]: o.update({result: float(percentid)}) if 'H' in result.split('_')[-1]: h.update({result: float(percentid)}) # O try: sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True) sample[self.analysistype].best_o_pid = str(sorted_o[0][1]) sample[self.analysistype].o_genes = [gene for gene, pid in o.items() if str(pid) == sample[self.analysistype].best_o_pid] sample[self.analysistype].o_set = \ list(set(gene.split('_')[-1] for gene in sample[self.analysistype].o_genes)) except (KeyError, IndexError): pass # H try: sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True) sample[self.analysistype].best_h_pid = str(sorted_h[0][1]) sample[self.analysistype].h_genes = [gene for gene, pid in h.items() if str(pid) == sample[self.analysistype].best_h_pid] sample[self.analysistype].h_set = \ list(set(gene.split('_')[-1] for gene in sample[self.analysistype].h_genes)) except (KeyError, IndexError): pass
[ "def", "serotype_escherichia", "(", "self", ")", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "# Initialise negative results to be overwritten when necessary", "sample", "[", "self", ".", "analysistype", "]", ".", "best_o_pid", "=", "'-'", "sample", "[", "self", ".", "analysistype", "]", ".", "o_genes", "=", "[", "'-'", "]", "sample", "[", "self", ".", "analysistype", "]", ".", "o_set", "=", "[", "'-'", "]", "sample", "[", "self", ".", "analysistype", "]", ".", "best_h_pid", "=", "'-'", "sample", "[", "self", ".", "analysistype", "]", ".", "h_genes", "=", "[", "'-'", "]", "sample", "[", "self", ".", "analysistype", "]", ".", "h_set", "=", "[", "'-'", "]", "if", "sample", ".", "general", ".", "bestassemblyfile", "!=", "'NA'", ":", "if", "sample", ".", "general", ".", "closestrefseqgenus", "==", "'Escherichia'", ":", "o", "=", "dict", "(", ")", "h", "=", "dict", "(", ")", "for", "result", ",", "percentid", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "results", ".", "items", "(", ")", ":", "if", "'O'", "in", "result", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", ":", "o", ".", "update", "(", "{", "result", ":", "float", "(", "percentid", ")", "}", ")", "if", "'H'", "in", "result", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", ":", "h", ".", "update", "(", "{", "result", ":", "float", "(", "percentid", ")", "}", ")", "# O", "try", ":", "sorted_o", "=", "sorted", "(", "o", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "best_o_pid", "=", "str", "(", "sorted_o", "[", "0", "]", "[", "1", "]", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "o_genes", "=", "[", "gene", "for", "gene", ",", "pid", "in", "o", ".", "items", "(", ")", "if", "str", "(", "pid", ")", "==", "sample", "[", "self", ".", "analysistype", "]", ".", "best_o_pid", "]", "sample", "[", "self", ".", "analysistype", "]", ".", "o_set", "=", "list", "(", "set", "(", "gene", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", "for", "gene", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "o_genes", ")", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "pass", "# H", "try", ":", "sorted_h", "=", "sorted", "(", "h", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "best_h_pid", "=", "str", "(", "sorted_h", "[", "0", "]", "[", "1", "]", ")", "sample", "[", "self", ".", "analysistype", "]", ".", "h_genes", "=", "[", "gene", "for", "gene", ",", "pid", "in", "h", ".", "items", "(", ")", "if", "str", "(", "pid", ")", "==", "sample", "[", "self", ".", "analysistype", "]", ".", "best_h_pid", "]", "sample", "[", "self", ".", "analysistype", "]", ".", "h_set", "=", "list", "(", "set", "(", "gene", ".", "split", "(", "'_'", ")", "[", "-", "1", "]", "for", "gene", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "h_genes", ")", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "pass" ]
Create attributes storing the best results for the O and H types
[ "Create", "attributes", "storing", "the", "best", "results", "for", "the", "O", "and", "H", "types" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/serosippr/serosippr.py#L67-L108
tsnaomi/finnsyll
finnsyll/prev/v02.py
_syllabify
def _syllabify(word): '''Syllabify the given word.''' word = replace_umlauts(word) word, CONTINUE_VV, CONTINUE_VVV, applied_rules = apply_T1(word) if CONTINUE_VV: word, T2 = apply_T2(word) word, T4 = apply_T4(word) applied_rules += T2 + T4 if CONTINUE_VVV: word, T5 = apply_T5(word) word, T6 = apply_T6(word) word, T7 = apply_T7(word) applied_rules += T5 + T6 + T7 word = replace_umlauts(word, put_back=True) return word, applied_rules
python
def _syllabify(word): '''Syllabify the given word.''' word = replace_umlauts(word) word, CONTINUE_VV, CONTINUE_VVV, applied_rules = apply_T1(word) if CONTINUE_VV: word, T2 = apply_T2(word) word, T4 = apply_T4(word) applied_rules += T2 + T4 if CONTINUE_VVV: word, T5 = apply_T5(word) word, T6 = apply_T6(word) word, T7 = apply_T7(word) applied_rules += T5 + T6 + T7 word = replace_umlauts(word, put_back=True) return word, applied_rules
[ "def", "_syllabify", "(", "word", ")", ":", "word", "=", "replace_umlauts", "(", "word", ")", "word", ",", "CONTINUE_VV", ",", "CONTINUE_VVV", ",", "applied_rules", "=", "apply_T1", "(", "word", ")", "if", "CONTINUE_VV", ":", "word", ",", "T2", "=", "apply_T2", "(", "word", ")", "word", ",", "T4", "=", "apply_T4", "(", "word", ")", "applied_rules", "+=", "T2", "+", "T4", "if", "CONTINUE_VVV", ":", "word", ",", "T5", "=", "apply_T5", "(", "word", ")", "word", ",", "T6", "=", "apply_T6", "(", "word", ")", "word", ",", "T7", "=", "apply_T7", "(", "word", ")", "applied_rules", "+=", "T5", "+", "T6", "+", "T7", "word", "=", "replace_umlauts", "(", "word", ",", "put_back", "=", "True", ")", "return", "word", ",", "applied_rules" ]
Syllabify the given word.
[ "Syllabify", "the", "given", "word", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L36-L54
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T1
def apply_T1(word): '''There is a syllable boundary in front of every CV-sequence.''' T1 = ' T1' WORD = _split_consonants_and_vowels(word) CONTINUE_VV = 0 CONTINUE_VVV = 0 for i, v in enumerate(WORD): if i == 0 and is_consonant(v[0][0]): continue elif is_consonant(v[0]) and i + 1 != len(WORD): WORD[i] = v[:-1] + '.' + v[-1] elif is_vowel(v[0]): if len(v) > 2: CONTINUE_VVV += 1 elif len(v) > 1: CONTINUE_VV += 1 word = ''.join(WORD) return word, CONTINUE_VV, CONTINUE_VVV, T1
python
def apply_T1(word): '''There is a syllable boundary in front of every CV-sequence.''' T1 = ' T1' WORD = _split_consonants_and_vowels(word) CONTINUE_VV = 0 CONTINUE_VVV = 0 for i, v in enumerate(WORD): if i == 0 and is_consonant(v[0][0]): continue elif is_consonant(v[0]) and i + 1 != len(WORD): WORD[i] = v[:-1] + '.' + v[-1] elif is_vowel(v[0]): if len(v) > 2: CONTINUE_VVV += 1 elif len(v) > 1: CONTINUE_VV += 1 word = ''.join(WORD) return word, CONTINUE_VV, CONTINUE_VVV, T1
[ "def", "apply_T1", "(", "word", ")", ":", "T1", "=", "' T1'", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "CONTINUE_VV", "=", "0", "CONTINUE_VVV", "=", "0", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "if", "i", "==", "0", "and", "is_consonant", "(", "v", "[", "0", "]", "[", "0", "]", ")", ":", "continue", "elif", "is_consonant", "(", "v", "[", "0", "]", ")", "and", "i", "+", "1", "!=", "len", "(", "WORD", ")", ":", "WORD", "[", "i", "]", "=", "v", "[", ":", "-", "1", "]", "+", "'.'", "+", "v", "[", "-", "1", "]", "elif", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "if", "len", "(", "v", ")", ">", "2", ":", "CONTINUE_VVV", "+=", "1", "elif", "len", "(", "v", ")", ">", "1", ":", "CONTINUE_VV", "+=", "1", "word", "=", "''", ".", "join", "(", "WORD", ")", "return", "word", ",", "CONTINUE_VV", ",", "CONTINUE_VVV", ",", "T1" ]
There is a syllable boundary in front of every CV-sequence.
[ "There", "is", "a", "syllable", "boundary", "in", "front", "of", "every", "CV", "-", "sequence", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L59-L84
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T2
def apply_T2(word): '''There is a syllable boundary within a sequence VV of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' T2 = '' WORD = word.split('.') for i, v in enumerate(WORD): if not contains_diphthong(v): VV = contains_VV(v) if VV: I = v.find(VV) + 1 WORD[i] = v[:I] + '.' + v[I:] T2 = ' T2' word = '.'.join(WORD) return word, T2
python
def apply_T2(word): '''There is a syllable boundary within a sequence VV of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' T2 = '' WORD = word.split('.') for i, v in enumerate(WORD): if not contains_diphthong(v): VV = contains_VV(v) if VV: I = v.find(VV) + 1 WORD[i] = v[:I] + '.' + v[I:] T2 = ' T2' word = '.'.join(WORD) return word, T2
[ "def", "apply_T2", "(", "word", ")", ":", "T2", "=", "''", "WORD", "=", "word", ".", "split", "(", "'.'", ")", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "if", "not", "contains_diphthong", "(", "v", ")", ":", "VV", "=", "contains_VV", "(", "v", ")", "if", "VV", ":", "I", "=", "v", ".", "find", "(", "VV", ")", "+", "1", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "T2", "=", "' T2'", "word", "=", "'.'", ".", "join", "(", "WORD", ")", "return", "word", ",", "T2" ]
There is a syllable boundary within a sequence VV of two nonidentical vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
[ "There", "is", "a", "syllable", "boundary", "within", "a", "sequence", "VV", "of", "two", "nonidentical", "vowels", "that", "are", "not", "a", "genuine", "diphthong", "e", ".", "g", ".", "[", "ta", ".", "e", "]", "[", "ko", ".", "et", ".", "taa", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L118-L136
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T4
def apply_T4(word): '''An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].''' T4 = '' WORD = word.split('.') for i, v in enumerate(WORD): # i % 2 != 0 prevents this rule from applying to first, third, etc. # syllables, which receive stress (WSP) if is_consonant(v[-1]) and i % 2 != 0: if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]): if contains_Vu_diphthong(v): I = v.rfind('u') WORD[i] = v[:I] + '.' + v[I:] T4 = ' T4' elif contains_Vy_diphthong(v): I = v.rfind('y') WORD[i] = v[:I] + '.' + v[I:] T4 = ' T4' word = '.'.join(WORD) return word, T4
python
def apply_T4(word): '''An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].''' T4 = '' WORD = word.split('.') for i, v in enumerate(WORD): # i % 2 != 0 prevents this rule from applying to first, third, etc. # syllables, which receive stress (WSP) if is_consonant(v[-1]) and i % 2 != 0: if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]): if contains_Vu_diphthong(v): I = v.rfind('u') WORD[i] = v[:I] + '.' + v[I:] T4 = ' T4' elif contains_Vy_diphthong(v): I = v.rfind('y') WORD[i] = v[:I] + '.' + v[I:] T4 = ' T4' word = '.'.join(WORD) return word, T4
[ "def", "apply_T4", "(", "word", ")", ":", "T4", "=", "''", "WORD", "=", "word", ".", "split", "(", "'.'", ")", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "# i % 2 != 0 prevents this rule from applying to first, third, etc.", "# syllables, which receive stress (WSP)", "if", "is_consonant", "(", "v", "[", "-", "1", "]", ")", "and", "i", "%", "2", "!=", "0", ":", "if", "i", "+", "1", "==", "len", "(", "WORD", ")", "or", "is_consonant", "(", "WORD", "[", "i", "+", "1", "]", "[", "0", "]", ")", ":", "if", "contains_Vu_diphthong", "(", "v", ")", ":", "I", "=", "v", ".", "rfind", "(", "'u'", ")", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "T4", "=", "' T4'", "elif", "contains_Vy_diphthong", "(", "v", ")", ":", "I", "=", "v", ".", "rfind", "(", "'y'", ")", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "T4", "=", "' T4'", "word", "=", "'.'", ".", "join", "(", "WORD", ")", "return", "word", ",", "T4" ]
An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].
[ "An", "agglutination", "diphthong", "that", "ends", "in", "/", "u", "y", "/", "usually", "contains", "a", "syllable", "boundary", "when", "-", "C#", "or", "-", "CCV", "follow", "e", ".", "g", ".", "[", "lau", ".", "ka", ".", "us", "]", "[", "va", ".", "ka", ".", "ut", ".", "taa", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L141-L168
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T5
def apply_T5(word): # BROKEN '''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' T5 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v) and any(i for i in i_DIPHTHONGS if i in v): I = v.rfind('i') - 1 or 2 I = I + 2 if is_consonant(v[I - 1]) else I WORD[i] = v[:I] + '.' + v[I:] T5 = ' T5' word = '.'.join(WORD) return word, T5
python
def apply_T5(word): # BROKEN '''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' T5 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v) and any(i for i in i_DIPHTHONGS if i in v): I = v.rfind('i') - 1 or 2 I = I + 2 if is_consonant(v[I - 1]) else I WORD[i] = v[:I] + '.' + v[I:] T5 = ' T5' word = '.'.join(WORD) return word, T5
[ "def", "apply_T5", "(", "word", ")", ":", "# BROKEN", "T5", "=", "''", "WORD", "=", "word", ".", "split", "(", "'.'", ")", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "if", "contains_VVV", "(", "v", ")", "and", "any", "(", "i", "for", "i", "in", "i_DIPHTHONGS", "if", "i", "in", "v", ")", ":", "I", "=", "v", ".", "rfind", "(", "'i'", ")", "-", "1", "or", "2", "I", "=", "I", "+", "2", "if", "is_consonant", "(", "v", "[", "I", "-", "1", "]", ")", "else", "I", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "T5", "=", "' T5'", "word", "=", "'.'", ".", "join", "(", "WORD", ")", "return", "word", ",", "T5" ]
If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].
[ "If", "a", "(", "V", ")", "VVV", "-", "sequence", "contains", "a", "VV", "-", "sequence", "that", "could", "be", "an", "/", "i", "/", "-", "final", "diphthong", "there", "is", "a", "syllable", "boundary", "between", "it", "and", "the", "third", "vowel", "e", ".", "g", ".", "[", "raa", ".", "ois", ".", "sa", "]", "[", "huo", ".", "uim", ".", "me", "]", "[", "la", ".", "eis", ".", "sa", "]", "[", "sel", ".", "vi", ".", "äi", ".", "si", "]", "[", "tai", ".", "an", "]", "[", "säi", ".", "e", "]", "[", "oi", ".", "om", ".", "me", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L176-L193
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T6
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' T6 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0] if VV: I = VV[0] T6 = ' T6' if I + 2 == len(v) or is_vowel(v[I + 2]): WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO else: WORD[i] = v[:I] + '.' + v[I:] word = '.'.join(WORD) word = word.strip('.') # TODO return word, T6
python
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' T6 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0] if VV: I = VV[0] T6 = ' T6' if I + 2 == len(v) or is_vowel(v[I + 2]): WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO else: WORD[i] = v[:I] + '.' + v[I:] word = '.'.join(WORD) word = word.strip('.') # TODO return word, T6
[ "def", "apply_T6", "(", "word", ")", ":", "T6", "=", "''", "WORD", "=", "word", ".", "split", "(", "'.'", ")", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "if", "contains_VVV", "(", "v", ")", ":", "VV", "=", "[", "v", ".", "find", "(", "j", ")", "for", "j", "in", "LONG_VOWELS", "if", "v", ".", "find", "(", "j", ")", ">", "0", "]", "if", "VV", ":", "I", "=", "VV", "[", "0", "]", "T6", "=", "' T6'", "if", "I", "+", "2", "==", "len", "(", "v", ")", "or", "is_vowel", "(", "v", "[", "I", "+", "2", "]", ")", ":", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "+", "2", "]", "+", "'.'", "+", "v", "[", "I", "+", "2", ":", "]", "# TODO", "else", ":", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "word", "=", "'.'", ".", "join", "(", "WORD", ")", "word", "=", "word", ".", "strip", "(", "'.'", ")", "# TODO", "return", "word", ",", "T6" ]
If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
[ "If", "a", "VVV", "-", "sequence", "contains", "a", "long", "vowel", "there", "is", "a", "syllable", "boundary", "between", "it", "and", "the", "third", "vowel", "e", ".", "g", ".", "[", "kor", ".", "ke", ".", "aa", "]", "[", "yh", ".", "ti", ".", "öön", "]", "[", "ruu", ".", "an", "]", "[", "mää", ".", "yt", ".", "te", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L201-L226
tsnaomi/finnsyll
finnsyll/prev/v02.py
apply_T7
def apply_T7(word): '''If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].''' T7 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): for I, V in enumerate(v[::-1]): if is_vowel(V): WORD[i] = v[:I] + '.' + v[I:] T7 = ' T7' word = '.'.join(WORD) return word, T7
python
def apply_T7(word): '''If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].''' T7 = '' WORD = word.split('.') for i, v in enumerate(WORD): if contains_VVV(v): for I, V in enumerate(v[::-1]): if is_vowel(V): WORD[i] = v[:I] + '.' + v[I:] T7 = ' T7' word = '.'.join(WORD) return word, T7
[ "def", "apply_T7", "(", "word", ")", ":", "T7", "=", "''", "WORD", "=", "word", ".", "split", "(", "'.'", ")", "for", "i", ",", "v", "in", "enumerate", "(", "WORD", ")", ":", "if", "contains_VVV", "(", "v", ")", ":", "for", "I", ",", "V", "in", "enumerate", "(", "v", "[", ":", ":", "-", "1", "]", ")", ":", "if", "is_vowel", "(", "V", ")", ":", "WORD", "[", "i", "]", "=", "v", "[", ":", "I", "]", "+", "'.'", "+", "v", "[", "I", ":", "]", "T7", "=", "' T7'", "word", "=", "'.'", ".", "join", "(", "WORD", ")", "return", "word", ",", "T7" ]
If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].
[ "If", "a", "VVV", "-", "sequence", "does", "not", "contain", "a", "potential", "/", "i", "/", "-", "final", "diphthong", "there", "is", "a", "syllable", "boundary", "between", "the", "second", "and", "third", "vowels", "e", ".", "g", ".", "[", "kau", ".", "an", "]", "[", "leu", ".", "an", "]", "[", "kiu", ".", "as", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v02.py#L231-L250
obulpathi/cdn-fastly-python
bin/fastly_upload_vcl.py
main
def main(): """ Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given. """ parser = OptionParser(description= "Upload a vcl file (set as main) to a given fastly service. All arguments are required.") parser.add_option("-k", "--key", dest="apikey", help="fastly api key") parser.add_option("-u", "--user", dest="user", help="fastly user name") parser.add_option("-p", "--password", dest="password", help="fastly password") parser.add_option("-f", "--file", dest="filename", help="vcl file to upload") parser.add_option("-s", "--service", dest="service_name", help="service to update") parser.add_option("-d", "--delete_vcl", action="store_true", dest="delete_vcl", default=False, help="delete existing vcl files from service\ before uploading") parser.add_option("-i", "--include", action="store_true", dest="include_vcl", default=False, help="do not set uploaded vcl as main,\ to be included only") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options:" parser.print_help() sys.exit(1) vcl_name = options.filename.split('/').pop() service_name = options.service_name vcl_file = open(options.filename, 'r') vcl_content = vcl_file.read() # Need to fully authenticate to access all features. client = fastly.connect(options.apikey) client.login(options.user, options.password) service = client.get_service_by_name(service_name) versions = client.list_versions(service.id) latest = versions.pop() if latest.locked is True or latest.active is True: print "\n[ Cloning version %d ]\n"\ % (latest.number) latest = client.clone_version(service.id, latest.number) if options.delete_vcl: vcls = client.list_vcls(service.id, latest.number) for vcl in vcls: print "\n[ Deleting vcl file %s from version %d ]\n" %\ (service_name, latest.number) client.delete_vcl(service.id, latest.number, vcl.name) if vcl_name in latest.vcls: print "\n[ Updating vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.update_vcl(service.id, latest.number, vcl_name, content=vcl_content) else: print "\n[ Uploading new vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.upload_vcl(service.id, latest.number, vcl_name, vcl_content) if options.include_vcl is False: print "\n[ Setting vcl %s as main ]\n" % (vcl_name) client.set_main_vcl(service.id, latest.number, vcl_name) client.activate_version(service.id, latest.number) print "\n[ Activing configuration version %d ]\n" % (latest.number)
python
def main(): """ Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given. """ parser = OptionParser(description= "Upload a vcl file (set as main) to a given fastly service. All arguments are required.") parser.add_option("-k", "--key", dest="apikey", help="fastly api key") parser.add_option("-u", "--user", dest="user", help="fastly user name") parser.add_option("-p", "--password", dest="password", help="fastly password") parser.add_option("-f", "--file", dest="filename", help="vcl file to upload") parser.add_option("-s", "--service", dest="service_name", help="service to update") parser.add_option("-d", "--delete_vcl", action="store_true", dest="delete_vcl", default=False, help="delete existing vcl files from service\ before uploading") parser.add_option("-i", "--include", action="store_true", dest="include_vcl", default=False, help="do not set uploaded vcl as main,\ to be included only") (options, args) = parser.parse_args() for val in options.__dict__.values(): if val is None: print "Missing required options:" parser.print_help() sys.exit(1) vcl_name = options.filename.split('/').pop() service_name = options.service_name vcl_file = open(options.filename, 'r') vcl_content = vcl_file.read() # Need to fully authenticate to access all features. client = fastly.connect(options.apikey) client.login(options.user, options.password) service = client.get_service_by_name(service_name) versions = client.list_versions(service.id) latest = versions.pop() if latest.locked is True or latest.active is True: print "\n[ Cloning version %d ]\n"\ % (latest.number) latest = client.clone_version(service.id, latest.number) if options.delete_vcl: vcls = client.list_vcls(service.id, latest.number) for vcl in vcls: print "\n[ Deleting vcl file %s from version %d ]\n" %\ (service_name, latest.number) client.delete_vcl(service.id, latest.number, vcl.name) if vcl_name in latest.vcls: print "\n[ Updating vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.update_vcl(service.id, latest.number, vcl_name, content=vcl_content) else: print "\n[ Uploading new vcl file %s on service %s version %d ]\n"\ % (vcl_name, service_name, latest.number) client.upload_vcl(service.id, latest.number, vcl_name, vcl_content) if options.include_vcl is False: print "\n[ Setting vcl %s as main ]\n" % (vcl_name) client.set_main_vcl(service.id, latest.number, vcl_name) client.activate_version(service.id, latest.number) print "\n[ Activing configuration version %d ]\n" % (latest.number)
[ "def", "main", "(", ")", ":", "parser", "=", "OptionParser", "(", "description", "=", "\"Upload a vcl file (set as main) to a given fastly service. All arguments are required.\"", ")", "parser", ".", "add_option", "(", "\"-k\"", ",", "\"--key\"", ",", "dest", "=", "\"apikey\"", ",", "help", "=", "\"fastly api key\"", ")", "parser", ".", "add_option", "(", "\"-u\"", ",", "\"--user\"", ",", "dest", "=", "\"user\"", ",", "help", "=", "\"fastly user name\"", ")", "parser", ".", "add_option", "(", "\"-p\"", ",", "\"--password\"", ",", "dest", "=", "\"password\"", ",", "help", "=", "\"fastly password\"", ")", "parser", ".", "add_option", "(", "\"-f\"", ",", "\"--file\"", ",", "dest", "=", "\"filename\"", ",", "help", "=", "\"vcl file to upload\"", ")", "parser", ".", "add_option", "(", "\"-s\"", ",", "\"--service\"", ",", "dest", "=", "\"service_name\"", ",", "help", "=", "\"service to update\"", ")", "parser", ".", "add_option", "(", "\"-d\"", ",", "\"--delete_vcl\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"delete_vcl\"", ",", "default", "=", "False", ",", "help", "=", "\"delete existing vcl files from service\\\n before uploading\"", ")", "parser", ".", "add_option", "(", "\"-i\"", ",", "\"--include\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"include_vcl\"", ",", "default", "=", "False", ",", "help", "=", "\"do not set uploaded vcl as main,\\\n to be included only\"", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "for", "val", "in", "options", ".", "__dict__", ".", "values", "(", ")", ":", "if", "val", "is", "None", ":", "print", "\"Missing required options:\"", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "vcl_name", "=", "options", ".", "filename", ".", "split", "(", "'/'", ")", ".", "pop", "(", ")", "service_name", "=", "options", ".", "service_name", "vcl_file", "=", "open", "(", "options", ".", "filename", ",", "'r'", ")", "vcl_content", "=", "vcl_file", ".", "read", "(", ")", "# Need to fully authenticate to access all features.", "client", "=", "fastly", ".", "connect", "(", "options", ".", "apikey", ")", "client", ".", "login", "(", "options", ".", "user", ",", "options", ".", "password", ")", "service", "=", "client", ".", "get_service_by_name", "(", "service_name", ")", "versions", "=", "client", ".", "list_versions", "(", "service", ".", "id", ")", "latest", "=", "versions", ".", "pop", "(", ")", "if", "latest", ".", "locked", "is", "True", "or", "latest", ".", "active", "is", "True", ":", "print", "\"\\n[ Cloning version %d ]\\n\"", "%", "(", "latest", ".", "number", ")", "latest", "=", "client", ".", "clone_version", "(", "service", ".", "id", ",", "latest", ".", "number", ")", "if", "options", ".", "delete_vcl", ":", "vcls", "=", "client", ".", "list_vcls", "(", "service", ".", "id", ",", "latest", ".", "number", ")", "for", "vcl", "in", "vcls", ":", "print", "\"\\n[ Deleting vcl file %s from version %d ]\\n\"", "%", "(", "service_name", ",", "latest", ".", "number", ")", "client", ".", "delete_vcl", "(", "service", ".", "id", ",", "latest", ".", "number", ",", "vcl", ".", "name", ")", "if", "vcl_name", "in", "latest", ".", "vcls", ":", "print", "\"\\n[ Updating vcl file %s on service %s version %d ]\\n\"", "%", "(", "vcl_name", ",", "service_name", ",", "latest", ".", "number", ")", "client", ".", "update_vcl", "(", "service", ".", "id", ",", "latest", ".", "number", ",", "vcl_name", ",", "content", "=", "vcl_content", ")", "else", ":", "print", "\"\\n[ Uploading new vcl file %s on service %s version %d ]\\n\"", "%", "(", "vcl_name", ",", "service_name", ",", "latest", ".", "number", ")", "client", ".", "upload_vcl", "(", "service", ".", "id", ",", "latest", ".", "number", ",", "vcl_name", ",", "vcl_content", ")", "if", "options", ".", "include_vcl", "is", "False", ":", "print", "\"\\n[ Setting vcl %s as main ]\\n\"", "%", "(", "vcl_name", ")", "client", ".", "set_main_vcl", "(", "service", ".", "id", ",", "latest", ".", "number", ",", "vcl_name", ")", "client", ".", "activate_version", "(", "service", ".", "id", ",", "latest", ".", "number", ")", "print", "\"\\n[ Activing configuration version %d ]\\n\"", "%", "(", "latest", ".", "number", ")" ]
Upload a vcl file to a fastly service, cloning the current version if necessary. The uploaded vcl is set as main unless --include is given. All existing vcl files will be deleted first if --delete is given.
[ "Upload", "a", "vcl", "file", "to", "a", "fastly", "service", "cloning", "the", "current", "version", "if", "necessary", ".", "The", "uploaded", "vcl", "is", "set", "as", "main", "unless", "--", "include", "is", "given", ".", "All", "existing", "vcl", "files", "will", "be", "deleted", "first", "if", "--", "delete", "is", "given", "." ]
train
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/bin/fastly_upload_vcl.py#L34-L111
OLC-Bioinformatics/sipprverse
customsippr/customsippr.py
CustomGenes.main
def main(self): """ Run the necessary methods in the correct order """ self.target_validate() self.gene_names() Sippr(inputobject=self, k=self.kmer_size, allow_soft_clips=self.allow_soft_clips) self.report()
python
def main(self): """ Run the necessary methods in the correct order """ self.target_validate() self.gene_names() Sippr(inputobject=self, k=self.kmer_size, allow_soft_clips=self.allow_soft_clips) self.report()
[ "def", "main", "(", "self", ")", ":", "self", ".", "target_validate", "(", ")", "self", ".", "gene_names", "(", ")", "Sippr", "(", "inputobject", "=", "self", ",", "k", "=", "self", ".", "kmer_size", ",", "allow_soft_clips", "=", "self", ".", "allow_soft_clips", ")", "self", ".", "report", "(", ")" ]
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/customsippr/customsippr.py#L13-L22
OLC-Bioinformatics/sipprverse
customsippr/customsippr.py
CustomGenes.gene_names
def gene_names(self): """ Extract the names of the user-supplied targets """ # Iterate through all the target names in the formatted targets file for record in SeqIO.parse(self.targets, 'fasta'): # Append all the gene names to the list of names self.genes.append(record.id)
python
def gene_names(self): """ Extract the names of the user-supplied targets """ # Iterate through all the target names in the formatted targets file for record in SeqIO.parse(self.targets, 'fasta'): # Append all the gene names to the list of names self.genes.append(record.id)
[ "def", "gene_names", "(", "self", ")", ":", "# Iterate through all the target names in the formatted targets file", "for", "record", "in", "SeqIO", ".", "parse", "(", "self", ".", "targets", ",", "'fasta'", ")", ":", "# Append all the gene names to the list of names", "self", ".", "genes", ".", "append", "(", "record", ".", "id", ")" ]
Extract the names of the user-supplied targets
[ "Extract", "the", "names", "of", "the", "user", "-", "supplied", "targets" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/customsippr/customsippr.py#L32-L39
OLC-Bioinformatics/sipprverse
customsippr/customsippr.py
CustomGenes.report
def report(self): """ Create the report for the user-supplied targets """ # Add all the genes to the header header = 'Sample,' data = str() with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report: write_header = True for sample in self.runmetadata: data += sample.name + ',' # Iterate through all the user-supplied target names for target in sorted(self.genes): write_results = False # There was an issue with 'target' not matching 'name' due to a dash being replaced by an underscore # only in 'name'. This will hopefully address this issue target = target.replace('-', '_') if write_header: header += '{target}_match_details,{target},'.format(target=target) for name, identity in sample[self.analysistype].results.items(): # Ensure that all dashes are replaced with underscores name = name.replace('-', '_') # If the current target matches the target in the header, add the data to the string if name == target: write_results = True gene_results = '{percent_id}% ({avgdepth} +/- {stddev}),{record},'\ .format(percent_id=identity, avgdepth=sample[self.analysistype].avgdepth[name], stddev=sample[self.analysistype].standarddev[name], record=sample[self.analysistype].sequences[target]) # Populate the data string appropriately data += gene_results # If the target is not present, write dashes to represent the results and sequence if not write_results: data += '-,-,' data += ' \n' write_header = False header += '\n' # Write the strings to the report report.write(header) report.write(data)
python
def report(self): """ Create the report for the user-supplied targets """ # Add all the genes to the header header = 'Sample,' data = str() with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report: write_header = True for sample in self.runmetadata: data += sample.name + ',' # Iterate through all the user-supplied target names for target in sorted(self.genes): write_results = False # There was an issue with 'target' not matching 'name' due to a dash being replaced by an underscore # only in 'name'. This will hopefully address this issue target = target.replace('-', '_') if write_header: header += '{target}_match_details,{target},'.format(target=target) for name, identity in sample[self.analysistype].results.items(): # Ensure that all dashes are replaced with underscores name = name.replace('-', '_') # If the current target matches the target in the header, add the data to the string if name == target: write_results = True gene_results = '{percent_id}% ({avgdepth} +/- {stddev}),{record},'\ .format(percent_id=identity, avgdepth=sample[self.analysistype].avgdepth[name], stddev=sample[self.analysistype].standarddev[name], record=sample[self.analysistype].sequences[target]) # Populate the data string appropriately data += gene_results # If the target is not present, write dashes to represent the results and sequence if not write_results: data += '-,-,' data += ' \n' write_header = False header += '\n' # Write the strings to the report report.write(header) report.write(data)
[ "def", "report", "(", "self", ")", ":", "# Add all the genes to the header", "header", "=", "'Sample,'", "data", "=", "str", "(", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "reportpath", ",", "'{at}.csv'", ".", "format", "(", "at", "=", "self", ".", "analysistype", ")", ")", ",", "'w'", ")", "as", "report", ":", "write_header", "=", "True", "for", "sample", "in", "self", ".", "runmetadata", ":", "data", "+=", "sample", ".", "name", "+", "','", "# Iterate through all the user-supplied target names", "for", "target", "in", "sorted", "(", "self", ".", "genes", ")", ":", "write_results", "=", "False", "# There was an issue with 'target' not matching 'name' due to a dash being replaced by an underscore", "# only in 'name'. This will hopefully address this issue", "target", "=", "target", ".", "replace", "(", "'-'", ",", "'_'", ")", "if", "write_header", ":", "header", "+=", "'{target}_match_details,{target},'", ".", "format", "(", "target", "=", "target", ")", "for", "name", ",", "identity", "in", "sample", "[", "self", ".", "analysistype", "]", ".", "results", ".", "items", "(", ")", ":", "# Ensure that all dashes are replaced with underscores", "name", "=", "name", ".", "replace", "(", "'-'", ",", "'_'", ")", "# If the current target matches the target in the header, add the data to the string", "if", "name", "==", "target", ":", "write_results", "=", "True", "gene_results", "=", "'{percent_id}% ({avgdepth} +/- {stddev}),{record},'", ".", "format", "(", "percent_id", "=", "identity", ",", "avgdepth", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "avgdepth", "[", "name", "]", ",", "stddev", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "standarddev", "[", "name", "]", ",", "record", "=", "sample", "[", "self", ".", "analysistype", "]", ".", "sequences", "[", "target", "]", ")", "# Populate the data string appropriately", "data", "+=", "gene_results", "# If the target is not present, write dashes to represent the results and sequence", "if", "not", "write_results", ":", "data", "+=", "'-,-,'", "data", "+=", "' \\n'", "write_header", "=", "False", "header", "+=", "'\\n'", "# Write the strings to the report", "report", ".", "write", "(", "header", ")", "report", ".", "write", "(", "data", ")" ]
Create the report for the user-supplied targets
[ "Create", "the", "report", "for", "the", "user", "-", "supplied", "targets" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/customsippr/customsippr.py#L41-L81
benoitkugler/abstractDataLibrary
pyDLib/GUI/fields.py
Tels.on_add
def on_add(self, item): """Convert to pseuso acces""" super(Tels, self).on_add(list_views.PseudoAccesCategorie(item))
python
def on_add(self, item): """Convert to pseuso acces""" super(Tels, self).on_add(list_views.PseudoAccesCategorie(item))
[ "def", "on_add", "(", "self", ",", "item", ")", ":", "super", "(", "Tels", ",", "self", ")", ".", "on_add", "(", "list_views", ".", "PseudoAccesCategorie", "(", "item", ")", ")" ]
Convert to pseuso acces
[ "Convert", "to", "pseuso", "acces" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/fields.py#L70-L72
benoitkugler/abstractDataLibrary
pyDLib/GUI/fields.py
Duree.set_data
def set_data(self, *args): """we cant to call set_data to manually update""" db = self.begining.get_data() or formats.DATE_DEFAULT df = self.end.get_data() or formats.DATE_DEFAULT jours = max((df - db).days + 1, 0) self.setText(str(jours) + (jours >= 2 and " jours" or " jour"))
python
def set_data(self, *args): """we cant to call set_data to manually update""" db = self.begining.get_data() or formats.DATE_DEFAULT df = self.end.get_data() or formats.DATE_DEFAULT jours = max((df - db).days + 1, 0) self.setText(str(jours) + (jours >= 2 and " jours" or " jour"))
[ "def", "set_data", "(", "self", ",", "*", "args", ")", ":", "db", "=", "self", ".", "begining", ".", "get_data", "(", ")", "or", "formats", ".", "DATE_DEFAULT", "df", "=", "self", ".", "end", ".", "get_data", "(", ")", "or", "formats", ".", "DATE_DEFAULT", "jours", "=", "max", "(", "(", "df", "-", "db", ")", ".", "days", "+", "1", ",", "0", ")", "self", ".", "setText", "(", "str", "(", "jours", ")", "+", "(", "jours", ">=", "2", "and", "\" jours\"", "or", "\" jour\"", ")", ")" ]
we cant to call set_data to manually update
[ "we", "cant", "to", "call", "set_data", "to", "manually", "update" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/GUI/fields.py#L95-L100
OLC-Bioinformatics/sipprverse
genesippr/genesippr.py
GeneSippr.runner
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) if not self.pipeline: general = None for sample in self.runmetadata.samples: general = getattr(sample, 'general') if general is None: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Sippr(self, self.cutoff) # Create the reports reports = Reports(self) Reports.reporter(reports, analysistype=self.analysistype) # Print the metadata MetadataPrinter(self)
python
def runner(self): """ Run the necessary methods in the correct order """ logging.info('Starting {} analysis pipeline'.format(self.analysistype)) if not self.pipeline: general = None for sample in self.runmetadata.samples: general = getattr(sample, 'general') if general is None: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses Sippr(self, self.cutoff) # Create the reports reports = Reports(self) Reports.reporter(reports, analysistype=self.analysistype) # Print the metadata MetadataPrinter(self)
[ "def", "runner", "(", "self", ")", ":", "logging", ".", "info", "(", "'Starting {} analysis pipeline'", ".", "format", "(", "self", ".", "analysistype", ")", ")", "if", "not", "self", ".", "pipeline", ":", "general", "=", "None", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "general", "=", "getattr", "(", "sample", ",", "'general'", ")", "if", "general", "is", "None", ":", "# Create the objects to be used in the analyses", "objects", "=", "Objectprep", "(", "self", ")", "objects", ".", "objectprep", "(", ")", "self", ".", "runmetadata", "=", "objects", ".", "samples", "# Run the analyses", "Sippr", "(", "self", ",", "self", ".", "cutoff", ")", "# Create the reports", "reports", "=", "Reports", "(", "self", ")", "Reports", ".", "reporter", "(", "reports", ",", "analysistype", "=", "self", ".", "analysistype", ")", "# Print the metadata", "MetadataPrinter", "(", "self", ")" ]
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/genesippr/genesippr.py#L19-L39
tsnaomi/finnsyll
finnsyll/prev/v01.py
same_syllabic_feature
def same_syllabic_feature(ch1, ch2): '''Return True if ch1 and ch2 are both vowels or both consonants.''' if ch1 == '.' or ch2 == '.': return False ch1 = 'V' if ch1 in VOWELS else 'C' if ch1 in CONSONANTS else None ch2 = 'V' if ch2 in VOWELS else 'C' if ch2 in CONSONANTS else None return ch1 == ch2
python
def same_syllabic_feature(ch1, ch2): '''Return True if ch1 and ch2 are both vowels or both consonants.''' if ch1 == '.' or ch2 == '.': return False ch1 = 'V' if ch1 in VOWELS else 'C' if ch1 in CONSONANTS else None ch2 = 'V' if ch2 in VOWELS else 'C' if ch2 in CONSONANTS else None return ch1 == ch2
[ "def", "same_syllabic_feature", "(", "ch1", ",", "ch2", ")", ":", "if", "ch1", "==", "'.'", "or", "ch2", "==", "'.'", ":", "return", "False", "ch1", "=", "'V'", "if", "ch1", "in", "VOWELS", "else", "'C'", "if", "ch1", "in", "CONSONANTS", "else", "None", "ch2", "=", "'V'", "if", "ch2", "in", "VOWELS", "else", "'C'", "if", "ch2", "in", "CONSONANTS", "else", "None", "return", "ch1", "==", "ch2" ]
Return True if ch1 and ch2 are both vowels or both consonants.
[ "Return", "True", "if", "ch1", "and", "ch2", "are", "both", "vowels", "or", "both", "consonants", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L14-L22
tsnaomi/finnsyll
finnsyll/prev/v01.py
syllabify
def syllabify(word): '''Syllabify the given word.''' word = replace_umlauts(word) word = apply_T1(word) word = apply_T2(word) word = apply_T4(word) word = apply_T5(word) word = apply_T6(word) word = apply_T7(word) word = replace_umlauts(word, put_back=True)[1:] # FENCEPOST return word
python
def syllabify(word): '''Syllabify the given word.''' word = replace_umlauts(word) word = apply_T1(word) word = apply_T2(word) word = apply_T4(word) word = apply_T5(word) word = apply_T6(word) word = apply_T7(word) word = replace_umlauts(word, put_back=True)[1:] # FENCEPOST return word
[ "def", "syllabify", "(", "word", ")", ":", "word", "=", "replace_umlauts", "(", "word", ")", "word", "=", "apply_T1", "(", "word", ")", "word", "=", "apply_T2", "(", "word", ")", "word", "=", "apply_T4", "(", "word", ")", "word", "=", "apply_T5", "(", "word", ")", "word", "=", "apply_T6", "(", "word", ")", "word", "=", "apply_T7", "(", "word", ")", "word", "=", "replace_umlauts", "(", "word", ",", "put_back", "=", "True", ")", "[", "1", ":", "]", "# FENCEPOST", "return", "word" ]
Syllabify the given word.
[ "Syllabify", "the", "given", "word", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L56-L70
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T1
def apply_T1(word): '''There is a syllable boundary in front of every CV-sequence.''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if k == 1 and is_consonantal_onset(v): WORD[k] = '.' + v elif is_consonant(v[0]) and WORD.get(k + 1, 0): WORD[k] = v[:-1] + '.' + v[-1] word = _compile_dict_into_word(WORD) return word
python
def apply_T1(word): '''There is a syllable boundary in front of every CV-sequence.''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if k == 1 and is_consonantal_onset(v): WORD[k] = '.' + v elif is_consonant(v[0]) and WORD.get(k + 1, 0): WORD[k] = v[:-1] + '.' + v[-1] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T1", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "k", "==", "1", "and", "is_consonantal_onset", "(", "v", ")", ":", "WORD", "[", "k", "]", "=", "'.'", "+", "v", "elif", "is_consonant", "(", "v", "[", "0", "]", ")", "and", "WORD", ".", "get", "(", "k", "+", "1", ",", "0", ")", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "-", "1", "]", "+", "'.'", "+", "v", "[", "-", "1", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
There is a syllable boundary in front of every CV-sequence.
[ "There", "is", "a", "syllable", "boundary", "in", "front", "of", "every", "CV", "-", "sequence", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L75-L89
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T2
def apply_T2(word): '''There is a syllable boundary within a sequence VV of two nonidentical that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if is_diphthong(v): continue if len(v) == 2 and is_vowel(v[0]): if v[0] != v[1]: WORD[k] = v[0] + '.' + v[1] word = _compile_dict_into_word(WORD) return word
python
def apply_T2(word): '''There is a syllable boundary within a sequence VV of two nonidentical that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if is_diphthong(v): continue if len(v) == 2 and is_vowel(v[0]): if v[0] != v[1]: WORD[k] = v[0] + '.' + v[1] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T2", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "is_diphthong", "(", "v", ")", ":", "continue", "if", "len", "(", "v", ")", "==", "2", "and", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "if", "v", "[", "0", "]", "!=", "v", "[", "1", "]", ":", "WORD", "[", "k", "]", "=", "v", "[", "0", "]", "+", "'.'", "+", "v", "[", "1", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
There is a syllable boundary within a sequence VV of two nonidentical that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
[ "There", "is", "a", "syllable", "boundary", "within", "a", "sequence", "VV", "of", "two", "nonidentical", "that", "are", "not", "a", "genuine", "diphthong", "e", ".", "g", ".", "[", "ta", ".", "e", "]", "[", "ko", ".", "et", ".", "taa", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L94-L111
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T4
def apply_T4(word): # OPTIMIZE '''An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 2 and v.endswith(('u', 'y')): if WORD.get(k + 2, 0): if not WORD.get(k + 3, 0): if len(WORD[k + 2]) == 1 and is_consonant(WORD[k + 2]): WORD[k] = v[0] + '.' + v[1] elif len(WORD[k + 1]) == 1 and WORD.get(k + 3, 0): if is_consonant(WORD[k + 3][0]): WORD[k] = v[0] + '.' + v[1] elif len(WORD[k + 2]) == 2: WORD[k] = v[0] + '.' + v[1] word = _compile_dict_into_word(WORD) return word
python
def apply_T4(word): # OPTIMIZE '''An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 2 and v.endswith(('u', 'y')): if WORD.get(k + 2, 0): if not WORD.get(k + 3, 0): if len(WORD[k + 2]) == 1 and is_consonant(WORD[k + 2]): WORD[k] = v[0] + '.' + v[1] elif len(WORD[k + 1]) == 1 and WORD.get(k + 3, 0): if is_consonant(WORD[k + 3][0]): WORD[k] = v[0] + '.' + v[1] elif len(WORD[k + 2]) == 2: WORD[k] = v[0] + '.' + v[1] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T4", "(", "word", ")", ":", "# OPTIMIZE", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "len", "(", "v", ")", "==", "2", "and", "v", ".", "endswith", "(", "(", "'u'", ",", "'y'", ")", ")", ":", "if", "WORD", ".", "get", "(", "k", "+", "2", ",", "0", ")", ":", "if", "not", "WORD", ".", "get", "(", "k", "+", "3", ",", "0", ")", ":", "if", "len", "(", "WORD", "[", "k", "+", "2", "]", ")", "==", "1", "and", "is_consonant", "(", "WORD", "[", "k", "+", "2", "]", ")", ":", "WORD", "[", "k", "]", "=", "v", "[", "0", "]", "+", "'.'", "+", "v", "[", "1", "]", "elif", "len", "(", "WORD", "[", "k", "+", "1", "]", ")", "==", "1", "and", "WORD", ".", "get", "(", "k", "+", "3", ",", "0", ")", ":", "if", "is_consonant", "(", "WORD", "[", "k", "+", "3", "]", "[", "0", "]", ")", ":", "WORD", "[", "k", "]", "=", "v", "[", "0", "]", "+", "'.'", "+", "v", "[", "1", "]", "elif", "len", "(", "WORD", "[", "k", "+", "2", "]", ")", "==", "2", ":", "WORD", "[", "k", "]", "=", "v", "[", "0", "]", "+", "'.'", "+", "v", "[", "1", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
An agglutination diphthong that ends in /u, y/ usually contains a syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us], [va.ka.ut.taa].
[ "An", "agglutination", "diphthong", "that", "ends", "in", "/", "u", "y", "/", "usually", "contains", "a", "syllable", "boundary", "when", "-", "C#", "or", "-", "CCV", "follow", "e", ".", "g", ".", "[", "lau", ".", "ka", ".", "us", "]", "[", "va", ".", "ka", ".", "ut", ".", "taa", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L116-L141
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T5
def apply_T5(word): '''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) >= 3 and is_vowel(v[0]): vv = [v.find(i) for i in i_DIPHTHONGS if v.find(i) > 0] if any(vv): vv = vv[0] if vv == v[0]: WORD[k] = v[:2] + '.' + v[2:] else: WORD[k] = v[:vv] + '.' + v[vv:] word = _compile_dict_into_word(WORD) return word
python
def apply_T5(word): '''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) >= 3 and is_vowel(v[0]): vv = [v.find(i) for i in i_DIPHTHONGS if v.find(i) > 0] if any(vv): vv = vv[0] if vv == v[0]: WORD[k] = v[:2] + '.' + v[2:] else: WORD[k] = v[:vv] + '.' + v[vv:] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T5", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "len", "(", "v", ")", ">=", "3", "and", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "vv", "=", "[", "v", ".", "find", "(", "i", ")", "for", "i", "in", "i_DIPHTHONGS", "if", "v", ".", "find", "(", "i", ")", ">", "0", "]", "if", "any", "(", "vv", ")", ":", "vv", "=", "vv", "[", "0", "]", "if", "vv", "==", "v", "[", "0", "]", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "2", "]", "+", "'.'", "+", "v", "[", "2", ":", "]", "else", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "vv", "]", "+", "'.'", "+", "v", "[", "vv", ":", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final diphthong, there is a syllable boundary between it and the third vowel, e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an], [säi.e], [oi.om.me].
[ "If", "a", "(", "V", ")", "VVV", "-", "sequence", "contains", "a", "VV", "-", "sequence", "that", "could", "be", "an", "/", "i", "/", "-", "final", "diphthong", "there", "is", "a", "syllable", "boundary", "between", "it", "and", "the", "third", "vowel", "e", ".", "g", ".", "[", "raa", ".", "ois", ".", "sa", "]", "[", "huo", ".", "uim", ".", "me", "]", "[", "la", ".", "eis", ".", "sa", "]", "[", "sel", ".", "vi", ".", "äi", ".", "si", "]", "[", "tai", ".", "an", "]", "[", "säi", ".", "e", "]", "[", "oi", ".", "om", ".", "me", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L149-L172
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T6
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 3 and is_vowel(v[0]): vv = [v.find(i) for i in LONG_VOWELS if v.find(i) > 0] if any(vv): vv = vv[0] if vv == v[0]: WORD[k] = v[:2] + '.' + v[2:] else: WORD[k] = v[:vv] + '.' + v[vv:] word = _compile_dict_into_word(WORD) return word
python
def apply_T6(word): '''If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 3 and is_vowel(v[0]): vv = [v.find(i) for i in LONG_VOWELS if v.find(i) > 0] if any(vv): vv = vv[0] if vv == v[0]: WORD[k] = v[:2] + '.' + v[2:] else: WORD[k] = v[:vv] + '.' + v[vv:] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T6", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "len", "(", "v", ")", "==", "3", "and", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "vv", "=", "[", "v", ".", "find", "(", "i", ")", "for", "i", "in", "LONG_VOWELS", "if", "v", ".", "find", "(", "i", ")", ">", "0", "]", "if", "any", "(", "vv", ")", ":", "vv", "=", "vv", "[", "0", "]", "if", "vv", "==", "v", "[", "0", "]", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "2", "]", "+", "'.'", "+", "v", "[", "2", ":", "]", "else", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "vv", "]", "+", "'.'", "+", "v", "[", "vv", ":", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
If a VVV-sequence contains a long vowel, there is a syllable boundary between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
[ "If", "a", "VVV", "-", "sequence", "contains", "a", "long", "vowel", "there", "is", "a", "syllable", "boundary", "between", "it", "and", "the", "third", "vowel", "e", ".", "g", ".", "[", "kor", ".", "ke", ".", "aa", "]", "[", "yh", ".", "ti", ".", "öön", "]", "[", "ruu", ".", "an", "]", "[", "mää", ".", "yt", ".", "te", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L180-L202
tsnaomi/finnsyll
finnsyll/prev/v01.py
apply_T7
def apply_T7(word): '''If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 3 and is_vowel(v[0]): WORD[k] = v[:2] + '.' + v[2:] word = _compile_dict_into_word(WORD) return word
python
def apply_T7(word): '''If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].''' WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 3 and is_vowel(v[0]): WORD[k] = v[:2] + '.' + v[2:] word = _compile_dict_into_word(WORD) return word
[ "def", "apply_T7", "(", "word", ")", ":", "WORD", "=", "_split_consonants_and_vowels", "(", "word", ")", "for", "k", ",", "v", "in", "WORD", ".", "iteritems", "(", ")", ":", "if", "len", "(", "v", ")", "==", "3", "and", "is_vowel", "(", "v", "[", "0", "]", ")", ":", "WORD", "[", "k", "]", "=", "v", "[", ":", "2", "]", "+", "'.'", "+", "v", "[", "2", ":", "]", "word", "=", "_compile_dict_into_word", "(", "WORD", ")", "return", "word" ]
If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].
[ "If", "a", "VVV", "-", "sequence", "does", "not", "contain", "a", "potential", "/", "i", "/", "-", "final", "diphthong", "there", "is", "a", "syllable", "boundary", "between", "the", "second", "and", "third", "vowels", "e", ".", "g", ".", "[", "kau", ".", "an", "]", "[", "leu", ".", "an", "]", "[", "kiu", ".", "as", "]", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v01.py#L207-L220
tsnaomi/finnsyll
finnsyll/prev/v12.py
syllabify
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' compound = not word.isalpha() syllabify = _syllabify_complex if compound else _syllabify_simplex syllabifications = list(syllabify(word)) # if variation, order variants from most preferred to least preferred if len(syllabifications) > 1: syllabifications = rank(syllabifications) for word, rules in syllabifications: yield _post_process(word, rules)
python
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' compound = not word.isalpha() syllabify = _syllabify_complex if compound else _syllabify_simplex syllabifications = list(syllabify(word)) # if variation, order variants from most preferred to least preferred if len(syllabifications) > 1: syllabifications = rank(syllabifications) for word, rules in syllabifications: yield _post_process(word, rules)
[ "def", "syllabify", "(", "word", ")", ":", "compound", "=", "not", "word", ".", "isalpha", "(", ")", "syllabify", "=", "_syllabify_complex", "if", "compound", "else", "_syllabify_simplex", "syllabifications", "=", "list", "(", "syllabify", "(", "word", ")", ")", "# if variation, order variants from most preferred to least preferred", "if", "len", "(", "syllabifications", ")", ">", "1", ":", "syllabifications", "=", "rank", "(", "syllabifications", ")", "for", "word", ",", "rules", "in", "syllabifications", ":", "yield", "_post_process", "(", "word", ",", "rules", ")" ]
Syllabify the given word, whether simplex or complex.
[ "Syllabify", "the", "given", "word", "whether", "simplex", "or", "complex", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v12.py#L17-L28
tsnaomi/finnsyll
finnsyll/prev/v12.py
wsp
def wsp(word): '''Return the number of unstressed superheavy syllables.''' violations = 0 unstressed = [] for w in extract_words(word): unstressed += w.split('.')[1::2] # even syllables # include extrametrical odd syllables as potential WSP violations if w.count('.') % 2 == 0: unstressed += [w.rsplit('.', 1)[-1], ] # SHSP for syll in unstressed: if re.search(r'[ieaouäöy]{2}[^$ieaouäöy]+', syll, flags=FLAGS): violations += 1 # # WSP (CVV = heavy) # for syll in unstressed: # if re.search( # ur'[ieaouäöy]{2}|[ieaouäöy]+[^ieaouäöy]+', # syll, flags=re.I | re.U): # violations += 1 return violations
python
def wsp(word): '''Return the number of unstressed superheavy syllables.''' violations = 0 unstressed = [] for w in extract_words(word): unstressed += w.split('.')[1::2] # even syllables # include extrametrical odd syllables as potential WSP violations if w.count('.') % 2 == 0: unstressed += [w.rsplit('.', 1)[-1], ] # SHSP for syll in unstressed: if re.search(r'[ieaouäöy]{2}[^$ieaouäöy]+', syll, flags=FLAGS): violations += 1 # # WSP (CVV = heavy) # for syll in unstressed: # if re.search( # ur'[ieaouäöy]{2}|[ieaouäöy]+[^ieaouäöy]+', # syll, flags=re.I | re.U): # violations += 1 return violations
[ "def", "wsp", "(", "word", ")", ":", "violations", "=", "0", "unstressed", "=", "[", "]", "for", "w", "in", "extract_words", "(", "word", ")", ":", "unstressed", "+=", "w", ".", "split", "(", "'.'", ")", "[", "1", ":", ":", "2", "]", "# even syllables", "# include extrametrical odd syllables as potential WSP violations", "if", "w", ".", "count", "(", "'.'", ")", "%", "2", "==", "0", ":", "unstressed", "+=", "[", "w", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", ",", "]", "# SHSP", "for", "syll", "in", "unstressed", ":", "if", "re", ".", "search", "(", "r'[ieaouäöy]{2}[^$ieaouäöy]+', sy", "l", ", fl", "a", "s=FLA", "G", "S):", "", "", "violations", "+=", "1", "# # WSP (CVV = heavy)", "# for syll in unstressed:", "# if re.search(", "# ur'[ieaouäöy]{2}|[ieaouäöy]+[^ieaouäöy]+',", "# syll, flags=re.I | re.U):", "# violations += 1", "return", "violations" ]
Return the number of unstressed superheavy syllables.
[ "Return", "the", "number", "of", "unstressed", "superheavy", "syllables", "." ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/v12.py#L359-L383
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractAcces.modifie
def modifie(self, key: str, value: Any) -> None: """Store the modification. `value` should be dumped in DB compatible format.""" if key in self.FIELDS_OPTIONS: self.modifie_options(key, value) else: self.modifications[key] = value
python
def modifie(self, key: str, value: Any) -> None: """Store the modification. `value` should be dumped in DB compatible format.""" if key in self.FIELDS_OPTIONS: self.modifie_options(key, value) else: self.modifications[key] = value
[ "def", "modifie", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "if", "key", "in", "self", ".", "FIELDS_OPTIONS", ":", "self", ".", "modifie_options", "(", "key", ",", "value", ")", "else", ":", "self", ".", "modifications", "[", "key", "]", "=", "value" ]
Store the modification. `value` should be dumped in DB compatible format.
[ "Store", "the", "modification", ".", "value", "should", "be", "dumped", "in", "DB", "compatible", "format", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L57-L62
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractAcces.modifie_many
def modifie_many(self, dic: dict): """Convenience function which calls modifie on each element of dic""" for i, v in dic.items(): self.modifie(i, v)
python
def modifie_many(self, dic: dict): """Convenience function which calls modifie on each element of dic""" for i, v in dic.items(): self.modifie(i, v)
[ "def", "modifie_many", "(", "self", ",", "dic", ":", "dict", ")", ":", "for", "i", ",", "v", "in", "dic", ".", "items", "(", ")", ":", "self", ".", "modifie", "(", "i", ",", "v", ")" ]
Convenience function which calls modifie on each element of dic
[ "Convenience", "function", "which", "calls", "modifie", "on", "each", "element", "of", "dic" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L64-L67
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractAcces.save
def save(self) -> sql.Executant: """Prepare a SQL request to save the current modifications. Returns actually a LIST of requests (which may be of length one). Note than it can include modifications on other part of the data. After succes, the base should be updated. """ r = self._dict_to_SQL(self.modifications) self.modifications.clear() return r
python
def save(self) -> sql.Executant: """Prepare a SQL request to save the current modifications. Returns actually a LIST of requests (which may be of length one). Note than it can include modifications on other part of the data. After succes, the base should be updated. """ r = self._dict_to_SQL(self.modifications) self.modifications.clear() return r
[ "def", "save", "(", "self", ")", "->", "sql", ".", "Executant", ":", "r", "=", "self", ".", "_dict_to_SQL", "(", "self", ".", "modifications", ")", "self", ".", "modifications", ".", "clear", "(", ")", "return", "r" ]
Prepare a SQL request to save the current modifications. Returns actually a LIST of requests (which may be of length one). Note than it can include modifications on other part of the data. After succes, the base should be updated.
[ "Prepare", "a", "SQL", "request", "to", "save", "the", "current", "modifications", ".", "Returns", "actually", "a", "LIST", "of", "requests", "(", "which", "may", "be", "of", "length", "one", ")", ".", "Note", "than", "it", "can", "include", "modifications", "on", "other", "part", "of", "the", "data", ".", "After", "succes", "the", "base", "should", "be", "updated", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L73-L81
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractAcces.modifie_options
def modifie_options(self, field_option, value): """Set options in modifications. All options will be stored since it should be grouped in the DB.""" options = dict(self["options"] or {}, **{field_option: value}) self.modifications["options"] = options
python
def modifie_options(self, field_option, value): """Set options in modifications. All options will be stored since it should be grouped in the DB.""" options = dict(self["options"] or {}, **{field_option: value}) self.modifications["options"] = options
[ "def", "modifie_options", "(", "self", ",", "field_option", ",", "value", ")", ":", "options", "=", "dict", "(", "self", "[", "\"options\"", "]", "or", "{", "}", ",", "*", "*", "{", "field_option", ":", "value", "}", ")", "self", ".", "modifications", "[", "\"options\"", "]", "=", "options" ]
Set options in modifications. All options will be stored since it should be grouped in the DB.
[ "Set", "options", "in", "modifications", ".", "All", "options", "will", "be", "stored", "since", "it", "should", "be", "grouped", "in", "the", "DB", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L86-L90
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractDictTable._from_dict_dict
def _from_dict_dict(cls, dic): """Takes a dict {id : dict_attributes} """ return cls({_convert_id(i): v for i, v in dic.items()})
python
def _from_dict_dict(cls, dic): """Takes a dict {id : dict_attributes} """ return cls({_convert_id(i): v for i, v in dic.items()})
[ "def", "_from_dict_dict", "(", "cls", ",", "dic", ")", ":", "return", "cls", "(", "{", "_convert_id", "(", "i", ")", ":", "v", "for", "i", ",", "v", "in", "dic", ".", "items", "(", ")", "}", ")" ]
Takes a dict {id : dict_attributes}
[ "Takes", "a", "dict", "{", "id", ":", "dict_attributes", "}" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L114-L116
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractDictTable._from_list_dict
def _from_list_dict(cls, list_dic): """Takes a list of dict like objects and uses `champ_id` field as Id""" return cls({_convert_id(dic[cls.CHAMP_ID]): dict(dic) for dic in list_dic})
python
def _from_list_dict(cls, list_dic): """Takes a list of dict like objects and uses `champ_id` field as Id""" return cls({_convert_id(dic[cls.CHAMP_ID]): dict(dic) for dic in list_dic})
[ "def", "_from_list_dict", "(", "cls", ",", "list_dic", ")", ":", "return", "cls", "(", "{", "_convert_id", "(", "dic", "[", "cls", ".", "CHAMP_ID", "]", ")", ":", "dict", "(", "dic", ")", "for", "dic", "in", "list_dic", "}", ")" ]
Takes a list of dict like objects and uses `champ_id` field as Id
[ "Takes", "a", "list", "of", "dict", "like", "objects", "and", "uses", "champ_id", "field", "as", "Id" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L119-L121
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractDictTable.base_recherche_rapide
def base_recherche_rapide(self, base, pattern, to_string_hook=None): """ Return a collection of access matching `pattern`. `to_string_hook` is an optionnal callable dict -> str to map record to string. Default to _record_to_string """ Ac = self.ACCES if pattern == "*": return groups.Collection(Ac(base, i) for i in self) if len(pattern) >= MIN_CHAR_SEARCH: # Needed chars. sub_patterns = pattern.split(" ") try: regexps = tuple(re.compile(sub_pattern, flags=re.I) for sub_pattern in sub_patterns) except re.error: return groups.Collection() def search(string): for regexp in regexps: if not regexp.search(string): return False return True to_string_hook = to_string_hook or self._record_to_string return groups.Collection(Ac(base, i) for i, p in self.items() if search(to_string_hook(p))) return groups.Collection()
python
def base_recherche_rapide(self, base, pattern, to_string_hook=None): """ Return a collection of access matching `pattern`. `to_string_hook` is an optionnal callable dict -> str to map record to string. Default to _record_to_string """ Ac = self.ACCES if pattern == "*": return groups.Collection(Ac(base, i) for i in self) if len(pattern) >= MIN_CHAR_SEARCH: # Needed chars. sub_patterns = pattern.split(" ") try: regexps = tuple(re.compile(sub_pattern, flags=re.I) for sub_pattern in sub_patterns) except re.error: return groups.Collection() def search(string): for regexp in regexps: if not regexp.search(string): return False return True to_string_hook = to_string_hook or self._record_to_string return groups.Collection(Ac(base, i) for i, p in self.items() if search(to_string_hook(p))) return groups.Collection()
[ "def", "base_recherche_rapide", "(", "self", ",", "base", ",", "pattern", ",", "to_string_hook", "=", "None", ")", ":", "Ac", "=", "self", ".", "ACCES", "if", "pattern", "==", "\"*\"", ":", "return", "groups", ".", "Collection", "(", "Ac", "(", "base", ",", "i", ")", "for", "i", "in", "self", ")", "if", "len", "(", "pattern", ")", ">=", "MIN_CHAR_SEARCH", ":", "# Needed chars.", "sub_patterns", "=", "pattern", ".", "split", "(", "\" \"", ")", "try", ":", "regexps", "=", "tuple", "(", "re", ".", "compile", "(", "sub_pattern", ",", "flags", "=", "re", ".", "I", ")", "for", "sub_pattern", "in", "sub_patterns", ")", "except", "re", ".", "error", ":", "return", "groups", ".", "Collection", "(", ")", "def", "search", "(", "string", ")", ":", "for", "regexp", "in", "regexps", ":", "if", "not", "regexp", ".", "search", "(", "string", ")", ":", "return", "False", "return", "True", "to_string_hook", "=", "to_string_hook", "or", "self", ".", "_record_to_string", "return", "groups", ".", "Collection", "(", "Ac", "(", "base", ",", "i", ")", "for", "i", ",", "p", "in", "self", ".", "items", "(", ")", "if", "search", "(", "to_string_hook", "(", "p", ")", ")", ")", "return", "groups", ".", "Collection", "(", ")" ]
Return a collection of access matching `pattern`. `to_string_hook` is an optionnal callable dict -> str to map record to string. Default to _record_to_string
[ "Return", "a", "collection", "of", "access", "matching", "pattern", ".", "to_string_hook", "is", "an", "optionnal", "callable", "dict", "-", ">", "str", "to", "map", "record", "to", "string", ".", "Default", "to", "_record_to_string" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L141-L167
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractDictTable.select_by_field
def select_by_field(self, base, field, value): """Return collection of acces whose field equal value""" Ac = self.ACCES return groups.Collection(Ac(base, i) for i, row in self.items() if row[field] == value)
python
def select_by_field(self, base, field, value): """Return collection of acces whose field equal value""" Ac = self.ACCES return groups.Collection(Ac(base, i) for i, row in self.items() if row[field] == value)
[ "def", "select_by_field", "(", "self", ",", "base", ",", "field", ",", "value", ")", ":", "Ac", "=", "self", ".", "ACCES", "return", "groups", ".", "Collection", "(", "Ac", "(", "base", ",", "i", ")", "for", "i", ",", "row", "in", "self", ".", "items", "(", ")", "if", "row", "[", "field", "]", "==", "value", ")" ]
Return collection of acces whose field equal value
[ "Return", "collection", "of", "acces", "whose", "field", "equal", "value" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L169-L172
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractDictTable.select_by_critere
def select_by_critere(self, base, criteria): """ :param base: Reference on whole base :param criteria: Callable abstractAcces -> Bool, acting as filter :return: Collection on acces passing the criteria """ Ac = self.ACCES return groups.Collection(Ac(base, i) for i in self if criteria(Ac(base, i)))
python
def select_by_critere(self, base, criteria): """ :param base: Reference on whole base :param criteria: Callable abstractAcces -> Bool, acting as filter :return: Collection on acces passing the criteria """ Ac = self.ACCES return groups.Collection(Ac(base, i) for i in self if criteria(Ac(base, i)))
[ "def", "select_by_critere", "(", "self", ",", "base", ",", "criteria", ")", ":", "Ac", "=", "self", ".", "ACCES", "return", "groups", ".", "Collection", "(", "Ac", "(", "base", ",", "i", ")", "for", "i", "in", "self", "if", "criteria", "(", "Ac", "(", "base", ",", "i", ")", ")", ")" ]
:param base: Reference on whole base :param criteria: Callable abstractAcces -> Bool, acting as filter :return: Collection on acces passing the criteria
[ ":", "param", "base", ":", "Reference", "on", "whole", "base", ":", "param", "criteria", ":", "Callable", "abstractAcces", "-", ">", "Bool", "acting", "as", "filter", ":", "return", ":", "Collection", "on", "acces", "passing", "the", "criteria" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L174-L181
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractBase.load_from_db
def load_from_db(cls, callback_etat=print, out=None): """Launch data fetching then load data received. The method _load_remote_db should be overridden. If out is given, datas are set in it, instead of returning a new base object. """ dic = cls._load_remote_db(callback_etat) callback_etat("Chargement...", 2, 3) if out is None: return cls(dic) cls.__init__(out, datas=dic)
python
def load_from_db(cls, callback_etat=print, out=None): """Launch data fetching then load data received. The method _load_remote_db should be overridden. If out is given, datas are set in it, instead of returning a new base object. """ dic = cls._load_remote_db(callback_etat) callback_etat("Chargement...", 2, 3) if out is None: return cls(dic) cls.__init__(out, datas=dic)
[ "def", "load_from_db", "(", "cls", ",", "callback_etat", "=", "print", ",", "out", "=", "None", ")", ":", "dic", "=", "cls", ".", "_load_remote_db", "(", "callback_etat", ")", "callback_etat", "(", "\"Chargement...\"", ",", "2", ",", "3", ")", "if", "out", "is", "None", ":", "return", "cls", "(", "dic", ")", "cls", ".", "__init__", "(", "out", ",", "datas", "=", "dic", ")" ]
Launch data fetching then load data received. The method _load_remote_db should be overridden. If out is given, datas are set in it, instead of returning a new base object.
[ "Launch", "data", "fetching", "then", "load", "data", "received", ".", "The", "method", "_load_remote_db", "should", "be", "overridden", ".", "If", "out", "is", "given", "datas", "are", "set", "in", "it", "instead", "of", "returning", "a", "new", "base", "object", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L214-L223
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractBase._parse_text_DB
def _parse_text_DB(self, s): """Returns a dict of table interpreted from s. s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] }""" dic = self.decode_json_str(s) new_dic = {} for table_name, (header, rows) in dic.items(): newl = [{c: ligne[i] for i, c in enumerate(header)} for ligne in rows] new_dic[table_name] = newl return new_dic
python
def _parse_text_DB(self, s): """Returns a dict of table interpreted from s. s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] }""" dic = self.decode_json_str(s) new_dic = {} for table_name, (header, rows) in dic.items(): newl = [{c: ligne[i] for i, c in enumerate(header)} for ligne in rows] new_dic[table_name] = newl return new_dic
[ "def", "_parse_text_DB", "(", "self", ",", "s", ")", ":", "dic", "=", "self", ".", "decode_json_str", "(", "s", ")", "new_dic", "=", "{", "}", "for", "table_name", ",", "(", "header", ",", "rows", ")", "in", "dic", ".", "items", "(", ")", ":", "newl", "=", "[", "{", "c", ":", "ligne", "[", "i", "]", "for", "i", ",", "c", "in", "enumerate", "(", "header", ")", "}", "for", "ligne", "in", "rows", "]", "new_dic", "[", "table_name", "]", "=", "newl", "return", "new_dic" ]
Returns a dict of table interpreted from s. s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] }
[ "Returns", "a", "dict", "of", "table", "interpreted", "from", "s", ".", "s", "should", "be", "Json", "string", "encoding", "a", "dict", "{", "table_name", ":", "[", "fields_name", "...", "]", "[", "rows", "...", "]", "}" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L235-L244
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractBase.load_from_local
def load_from_local(cls): """Load datas from local file.""" try: with open(cls.LOCAL_DB_PATH, 'rb') as f: b = f.read() s = security.protege_data(b, False) except (FileNotFoundError, KeyError): logging.exception(cls.__name__) raise StructureError( "Erreur dans le chargement de la sauvegarde locale !") else: return cls(cls.decode_json_str(s))
python
def load_from_local(cls): """Load datas from local file.""" try: with open(cls.LOCAL_DB_PATH, 'rb') as f: b = f.read() s = security.protege_data(b, False) except (FileNotFoundError, KeyError): logging.exception(cls.__name__) raise StructureError( "Erreur dans le chargement de la sauvegarde locale !") else: return cls(cls.decode_json_str(s))
[ "def", "load_from_local", "(", "cls", ")", ":", "try", ":", "with", "open", "(", "cls", ".", "LOCAL_DB_PATH", ",", "'rb'", ")", "as", "f", ":", "b", "=", "f", ".", "read", "(", ")", "s", "=", "security", ".", "protege_data", "(", "b", ",", "False", ")", "except", "(", "FileNotFoundError", ",", "KeyError", ")", ":", "logging", ".", "exception", "(", "cls", ".", "__name__", ")", "raise", "StructureError", "(", "\"Erreur dans le chargement de la sauvegarde locale !\"", ")", "else", ":", "return", "cls", "(", "cls", ".", "decode_json_str", "(", "s", ")", ")" ]
Load datas from local file.
[ "Load", "datas", "from", "local", "file", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L255-L266
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractBase.dumps
def dumps(self): """Return a dictionnary of current tables""" return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES}
python
def dumps(self): """Return a dictionnary of current tables""" return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES}
[ "def", "dumps", "(", "self", ")", ":", "return", "{", "table_name", ":", "getattr", "(", "self", ",", "table_name", ")", ".", "dumps", "(", ")", "for", "table_name", "in", "self", ".", "TABLES", "}" ]
Return a dictionnary of current tables
[ "Return", "a", "dictionnary", "of", "current", "tables" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L285-L287
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
abstractBase.save_to_local
def save_to_local(self, callback_etat=print): """ Saved current in memory base to local file. It's a backup, not a convenient way to update datas :param callback_etat: state callback, taking str,int,int as args """ callback_etat("Aquisition...", 0, 3) d = self.dumps() s = json.dumps(d, indent=4, cls=formats.JsonEncoder) callback_etat("Chiffrement...", 1, 3) s = security.protege_data(s, True) callback_etat("Enregistrement...", 2, 3) try: with open(self.LOCAL_DB_PATH, 'wb') as f: f.write(s) except (FileNotFoundError): logging.exception(self.__class__.__name__) raise StructureError("Chemin de sauvegarde introuvable !")
python
def save_to_local(self, callback_etat=print): """ Saved current in memory base to local file. It's a backup, not a convenient way to update datas :param callback_etat: state callback, taking str,int,int as args """ callback_etat("Aquisition...", 0, 3) d = self.dumps() s = json.dumps(d, indent=4, cls=formats.JsonEncoder) callback_etat("Chiffrement...", 1, 3) s = security.protege_data(s, True) callback_etat("Enregistrement...", 2, 3) try: with open(self.LOCAL_DB_PATH, 'wb') as f: f.write(s) except (FileNotFoundError): logging.exception(self.__class__.__name__) raise StructureError("Chemin de sauvegarde introuvable !")
[ "def", "save_to_local", "(", "self", ",", "callback_etat", "=", "print", ")", ":", "callback_etat", "(", "\"Aquisition...\"", ",", "0", ",", "3", ")", "d", "=", "self", ".", "dumps", "(", ")", "s", "=", "json", ".", "dumps", "(", "d", ",", "indent", "=", "4", ",", "cls", "=", "formats", ".", "JsonEncoder", ")", "callback_etat", "(", "\"Chiffrement...\"", ",", "1", ",", "3", ")", "s", "=", "security", ".", "protege_data", "(", "s", ",", "True", ")", "callback_etat", "(", "\"Enregistrement...\"", ",", "2", ",", "3", ")", "try", ":", "with", "open", "(", "self", ".", "LOCAL_DB_PATH", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "except", "(", "FileNotFoundError", ")", ":", "logging", ".", "exception", "(", "self", ".", "__class__", ".", "__name__", ")", "raise", "StructureError", "(", "\"Chemin de sauvegarde introuvable !\"", ")" ]
Saved current in memory base to local file. It's a backup, not a convenient way to update datas :param callback_etat: state callback, taking str,int,int as args
[ "Saved", "current", "in", "memory", "base", "to", "local", "file", ".", "It", "s", "a", "backup", "not", "a", "convenient", "way", "to", "update", "datas" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L301-L319
kappius/pyheaderfile
pyheaderfile/excel.py
Xls.read_cell
def read_cell(self, x, y): """ reads the cell at position x and y; puts the default styles in xlwt """ cell = self._sheet.row(x)[y] if self._file.xf_list[ cell.xf_index].background.pattern_colour_index == 64: self._file.xf_list[ cell.xf_index].background.pattern_colour_index = 9 if self._file.xf_list[ cell.xf_index].background.pattern_colour_index in self.colors.keys(): style = self.colors[self._file.xf_list[ cell.xf_index].background.pattern_colour_index] else: style = self.xlwt.easyxf( 'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;') style.pattern.pattern_fore_colour = self._file.xf_list[ cell.xf_index].background.pattern_colour_index self.colors[self._file.xf_list[ cell.xf_index].background.pattern_colour_index] = style style.font.name = self._file.font_list[ self._file.xf_list[cell.xf_index].font_index].name style.font.bold = self._file.font_list[ self._file.xf_list[cell.xf_index].font_index].bold if isinstance(self.header[y], tuple): header = self.header[y][0] else: header = self.header[y] if self.strip: if is_str_or_unicode(cell.value): cell.value = cell.value.strip() if self.style: return {header: (cell.value, style)} else: return {header: cell.value}
python
def read_cell(self, x, y): """ reads the cell at position x and y; puts the default styles in xlwt """ cell = self._sheet.row(x)[y] if self._file.xf_list[ cell.xf_index].background.pattern_colour_index == 64: self._file.xf_list[ cell.xf_index].background.pattern_colour_index = 9 if self._file.xf_list[ cell.xf_index].background.pattern_colour_index in self.colors.keys(): style = self.colors[self._file.xf_list[ cell.xf_index].background.pattern_colour_index] else: style = self.xlwt.easyxf( 'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;') style.pattern.pattern_fore_colour = self._file.xf_list[ cell.xf_index].background.pattern_colour_index self.colors[self._file.xf_list[ cell.xf_index].background.pattern_colour_index] = style style.font.name = self._file.font_list[ self._file.xf_list[cell.xf_index].font_index].name style.font.bold = self._file.font_list[ self._file.xf_list[cell.xf_index].font_index].bold if isinstance(self.header[y], tuple): header = self.header[y][0] else: header = self.header[y] if self.strip: if is_str_or_unicode(cell.value): cell.value = cell.value.strip() if self.style: return {header: (cell.value, style)} else: return {header: cell.value}
[ "def", "read_cell", "(", "self", ",", "x", ",", "y", ")", ":", "cell", "=", "self", ".", "_sheet", ".", "row", "(", "x", ")", "[", "y", "]", "if", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "==", "64", ":", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "=", "9", "if", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "in", "self", ".", "colors", ".", "keys", "(", ")", ":", "style", "=", "self", ".", "colors", "[", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "]", "else", ":", "style", "=", "self", ".", "xlwt", ".", "easyxf", "(", "'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;'", ")", "style", ".", "pattern", ".", "pattern_fore_colour", "=", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "self", ".", "colors", "[", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "background", ".", "pattern_colour_index", "]", "=", "style", "style", ".", "font", ".", "name", "=", "self", ".", "_file", ".", "font_list", "[", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "font_index", "]", ".", "name", "style", ".", "font", ".", "bold", "=", "self", ".", "_file", ".", "font_list", "[", "self", ".", "_file", ".", "xf_list", "[", "cell", ".", "xf_index", "]", ".", "font_index", "]", ".", "bold", "if", "isinstance", "(", "self", ".", "header", "[", "y", "]", ",", "tuple", ")", ":", "header", "=", "self", ".", "header", "[", "y", "]", "[", "0", "]", "else", ":", "header", "=", "self", ".", "header", "[", "y", "]", "if", "self", ".", "strip", ":", "if", "is_str_or_unicode", "(", "cell", ".", "value", ")", ":", "cell", ".", "value", "=", "cell", ".", "value", ".", "strip", "(", ")", "if", "self", ".", "style", ":", "return", "{", "header", ":", "(", "cell", ".", "value", ",", "style", ")", "}", "else", ":", "return", "{", "header", ":", "cell", ".", "value", "}" ]
reads the cell at position x and y; puts the default styles in xlwt
[ "reads", "the", "cell", "at", "position", "x", "and", "y", ";", "puts", "the", "default", "styles", "in", "xlwt" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/excel.py#L20-L54
kappius/pyheaderfile
pyheaderfile/excel.py
Xls.write_cell
def write_cell(self, x, y, value, style=None): """ writing style and value in the cell of x and y position """ if isinstance(style, str): style = self.xlwt.easyxf(style) if style: self._sheet.write(x, y, label=value, style=style) else: self._sheet.write(x, y, label=value)
python
def write_cell(self, x, y, value, style=None): """ writing style and value in the cell of x and y position """ if isinstance(style, str): style = self.xlwt.easyxf(style) if style: self._sheet.write(x, y, label=value, style=style) else: self._sheet.write(x, y, label=value)
[ "def", "write_cell", "(", "self", ",", "x", ",", "y", ",", "value", ",", "style", "=", "None", ")", ":", "if", "isinstance", "(", "style", ",", "str", ")", ":", "style", "=", "self", ".", "xlwt", ".", "easyxf", "(", "style", ")", "if", "style", ":", "self", ".", "_sheet", ".", "write", "(", "x", ",", "y", ",", "label", "=", "value", ",", "style", "=", "style", ")", "else", ":", "self", ".", "_sheet", ".", "write", "(", "x", ",", "y", ",", "label", "=", "value", ")" ]
writing style and value in the cell of x and y position
[ "writing", "style", "and", "value", "in", "the", "cell", "of", "x", "and", "y", "position" ]
train
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/excel.py#L56-L65
OLC-Bioinformatics/sipprverse
cgecore/argumentparsing.py
get_string
def get_string(string): """ This function checks if a path was given as string, and tries to read the file and return the string. """ truestring = string if string is not None: if '/' in string: if os.path.isfile(string): try: with open_(string,'r') as f: truestring = ' '.join(line.strip() for line in f) except: pass if truestring.strip() == '': truestring = None return truestring
python
def get_string(string): """ This function checks if a path was given as string, and tries to read the file and return the string. """ truestring = string if string is not None: if '/' in string: if os.path.isfile(string): try: with open_(string,'r') as f: truestring = ' '.join(line.strip() for line in f) except: pass if truestring.strip() == '': truestring = None return truestring
[ "def", "get_string", "(", "string", ")", ":", "truestring", "=", "string", "if", "string", "is", "not", "None", ":", "if", "'/'", "in", "string", ":", "if", "os", ".", "path", ".", "isfile", "(", "string", ")", ":", "try", ":", "with", "open_", "(", "string", ",", "'r'", ")", "as", "f", ":", "truestring", "=", "' '", ".", "join", "(", "line", ".", "strip", "(", ")", "for", "line", "in", "f", ")", "except", ":", "pass", "if", "truestring", ".", "strip", "(", ")", "==", "''", ":", "truestring", "=", "None", "return", "truestring" ]
This function checks if a path was given as string, and tries to read the file and return the string.
[ "This", "function", "checks", "if", "a", "path", "was", "given", "as", "string", "and", "tries", "to", "read", "the", "file", "and", "return", "the", "string", "." ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/argumentparsing.py#L14-L27
OLC-Bioinformatics/sipprverse
cgecore/argumentparsing.py
get_arguments
def get_arguments(options): """ This function handles and validates the wrapper arguments. """ # These the next couple of lines defines the header of the Help output parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, usage=("""%(prog)s -------------------------------------------------------------------------------- """), description=(""" Service Wrapper =============== This is the service wrapper script, which is a part of the CGE services. Read the online manual for help. A list of all published services can be found at: cge.cbs.dtu.dk/services """), epilog=(""" -------------------------------------------------------------------------------- """)) #ADDING ARGUMENTS setarg = parser.add_argument #SERVICE SPECIFIC ARGUMENTS if isinstance(options, str): options = [[x for i,x in enumerate(line.split()) if i in [1,2]] for line in options.split('\n') if len(line)>0] for o in options: try: setarg(o[1], type=str, dest=o[0], default=None, help=SUPPRESS) except: None else: for o in options: if o[2] is True: # Handle negative flags setarg(o[0], action="store_false", dest=o[1], default=o[2], help=o[3]) elif o[2] is False: # Handle positive flags setarg(o[0], action="store_true", dest=o[1], default=o[2], help=o[3]) else: help_ = o[3] if o[2] is None else "%s [%s]"%(o[3], '%(default)s') setarg(o[0], type=str, dest=o[1], default=o[2], help=help_) # VALIDATION OF ARGUMENTS args = parser.parse_args() debug.log("ARGS: %s"%args) return args
python
def get_arguments(options): """ This function handles and validates the wrapper arguments. """ # These the next couple of lines defines the header of the Help output parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, usage=("""%(prog)s -------------------------------------------------------------------------------- """), description=(""" Service Wrapper =============== This is the service wrapper script, which is a part of the CGE services. Read the online manual for help. A list of all published services can be found at: cge.cbs.dtu.dk/services """), epilog=(""" -------------------------------------------------------------------------------- """)) #ADDING ARGUMENTS setarg = parser.add_argument #SERVICE SPECIFIC ARGUMENTS if isinstance(options, str): options = [[x for i,x in enumerate(line.split()) if i in [1,2]] for line in options.split('\n') if len(line)>0] for o in options: try: setarg(o[1], type=str, dest=o[0], default=None, help=SUPPRESS) except: None else: for o in options: if o[2] is True: # Handle negative flags setarg(o[0], action="store_false", dest=o[1], default=o[2], help=o[3]) elif o[2] is False: # Handle positive flags setarg(o[0], action="store_true", dest=o[1], default=o[2], help=o[3]) else: help_ = o[3] if o[2] is None else "%s [%s]"%(o[3], '%(default)s') setarg(o[0], type=str, dest=o[1], default=o[2], help=help_) # VALIDATION OF ARGUMENTS args = parser.parse_args() debug.log("ARGS: %s"%args) return args
[ "def", "get_arguments", "(", "options", ")", ":", "# These the next couple of lines defines the header of the Help output", "parser", "=", "ArgumentParser", "(", "formatter_class", "=", "RawDescriptionHelpFormatter", ",", "usage", "=", "(", "\"\"\"%(prog)s\n--------------------------------------------------------------------------------\n\"\"\"", ")", ",", "description", "=", "(", "\"\"\"\nService Wrapper\n===============\nThis is the service wrapper script, which is a part of the CGE services.\nRead the online manual for help.\nA list of all published services can be found at:\ncge.cbs.dtu.dk/services\n\n\"\"\"", ")", ",", "epilog", "=", "(", "\"\"\"\n--------------------------------------------------------------------------------\n \"\"\"", ")", ")", "#ADDING ARGUMENTS", "setarg", "=", "parser", ".", "add_argument", "#SERVICE SPECIFIC ARGUMENTS", "if", "isinstance", "(", "options", ",", "str", ")", ":", "options", "=", "[", "[", "x", "for", "i", ",", "x", "in", "enumerate", "(", "line", ".", "split", "(", ")", ")", "if", "i", "in", "[", "1", ",", "2", "]", "]", "for", "line", "in", "options", ".", "split", "(", "'\\n'", ")", "if", "len", "(", "line", ")", ">", "0", "]", "for", "o", "in", "options", ":", "try", ":", "setarg", "(", "o", "[", "1", "]", ",", "type", "=", "str", ",", "dest", "=", "o", "[", "0", "]", ",", "default", "=", "None", ",", "help", "=", "SUPPRESS", ")", "except", ":", "None", "else", ":", "for", "o", "in", "options", ":", "if", "o", "[", "2", "]", "is", "True", ":", "# Handle negative flags", "setarg", "(", "o", "[", "0", "]", ",", "action", "=", "\"store_false\"", ",", "dest", "=", "o", "[", "1", "]", ",", "default", "=", "o", "[", "2", "]", ",", "help", "=", "o", "[", "3", "]", ")", "elif", "o", "[", "2", "]", "is", "False", ":", "# Handle positive flags", "setarg", "(", "o", "[", "0", "]", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "o", "[", "1", "]", ",", "default", "=", "o", "[", "2", "]", ",", "help", "=", "o", "[", "3", "]", ")", "else", ":", "help_", "=", "o", "[", "3", "]", "if", "o", "[", "2", "]", "is", "None", "else", "\"%s [%s]\"", "%", "(", "o", "[", "3", "]", ",", "'%(default)s'", ")", "setarg", "(", "o", "[", "0", "]", ",", "type", "=", "str", ",", "dest", "=", "o", "[", "1", "]", ",", "default", "=", "o", "[", "2", "]", ",", "help", "=", "help_", ")", "# VALIDATION OF ARGUMENTS", "args", "=", "parser", ".", "parse_args", "(", ")", "debug", ".", "log", "(", "\"ARGS: %s\"", "%", "args", ")", "return", "args" ]
This function handles and validates the wrapper arguments.
[ "This", "function", "handles", "and", "validates", "the", "wrapper", "arguments", "." ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/argumentparsing.py#L29-L76
OLC-Bioinformatics/sipprverse
cgecore/argumentparsing.py
check_file_type
def check_file_type(files): """ Check whether the input files are in fasta format, reads format or other/mix formats. """ all_are_fasta = True all_are_reads = True all_are_empty = True if sys.version_info < (3, 0): if isinstance(files, (str, unicode)): files = [files] else: if isinstance(files, str): files = [files] for file_ in files: debug.log('Checking file type: %s'%file_) # Check if file is empty if os.stat(file_).st_size == 0: continue else: all_are_empty = False with open_(file_) as f: fc = f.readline()[0] if fc != "@": all_are_reads = False if fc != ">": all_are_fasta = False if all_are_empty: return 'empty' elif all_are_fasta: return 'fasta' elif all_are_reads: return 'fastq' else: return 'other'
python
def check_file_type(files): """ Check whether the input files are in fasta format, reads format or other/mix formats. """ all_are_fasta = True all_are_reads = True all_are_empty = True if sys.version_info < (3, 0): if isinstance(files, (str, unicode)): files = [files] else: if isinstance(files, str): files = [files] for file_ in files: debug.log('Checking file type: %s'%file_) # Check if file is empty if os.stat(file_).st_size == 0: continue else: all_are_empty = False with open_(file_) as f: fc = f.readline()[0] if fc != "@": all_are_reads = False if fc != ">": all_are_fasta = False if all_are_empty: return 'empty' elif all_are_fasta: return 'fasta' elif all_are_reads: return 'fastq' else: return 'other'
[ "def", "check_file_type", "(", "files", ")", ":", "all_are_fasta", "=", "True", "all_are_reads", "=", "True", "all_are_empty", "=", "True", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "if", "isinstance", "(", "files", ",", "(", "str", ",", "unicode", ")", ")", ":", "files", "=", "[", "files", "]", "else", ":", "if", "isinstance", "(", "files", ",", "str", ")", ":", "files", "=", "[", "files", "]", "for", "file_", "in", "files", ":", "debug", ".", "log", "(", "'Checking file type: %s'", "%", "file_", ")", "# Check if file is empty", "if", "os", ".", "stat", "(", "file_", ")", ".", "st_size", "==", "0", ":", "continue", "else", ":", "all_are_empty", "=", "False", "with", "open_", "(", "file_", ")", "as", "f", ":", "fc", "=", "f", ".", "readline", "(", ")", "[", "0", "]", "if", "fc", "!=", "\"@\"", ":", "all_are_reads", "=", "False", "if", "fc", "!=", "\">\"", ":", "all_are_fasta", "=", "False", "if", "all_are_empty", ":", "return", "'empty'", "elif", "all_are_fasta", ":", "return", "'fasta'", "elif", "all_are_reads", ":", "return", "'fastq'", "else", ":", "return", "'other'" ]
Check whether the input files are in fasta format, reads format or other/mix formats.
[ "Check", "whether", "the", "input", "files", "are", "in", "fasta", "format", "reads", "format", "or", "other", "/", "mix", "formats", "." ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/argumentparsing.py#L78-L101
OLC-Bioinformatics/sipprverse
cgecore/argumentparsing.py
make_file_list
def make_file_list(upload_path): """ This function returns list of files in the given dir """ newlist = [] for el in sorted(os.listdir(upload_path)): if ' ' in el: raise Exception('Error: Spaces are not allowed in file names!\n') newlist.append(os.path.normpath(upload_path+'/'+el)) debug.log('InputFiles: %s\n'%newlist) return newlist
python
def make_file_list(upload_path): """ This function returns list of files in the given dir """ newlist = [] for el in sorted(os.listdir(upload_path)): if ' ' in el: raise Exception('Error: Spaces are not allowed in file names!\n') newlist.append(os.path.normpath(upload_path+'/'+el)) debug.log('InputFiles: %s\n'%newlist) return newlist
[ "def", "make_file_list", "(", "upload_path", ")", ":", "newlist", "=", "[", "]", "for", "el", "in", "sorted", "(", "os", ".", "listdir", "(", "upload_path", ")", ")", ":", "if", "' '", "in", "el", ":", "raise", "Exception", "(", "'Error: Spaces are not allowed in file names!\\n'", ")", "newlist", ".", "append", "(", "os", ".", "path", ".", "normpath", "(", "upload_path", "+", "'/'", "+", "el", ")", ")", "debug", ".", "log", "(", "'InputFiles: %s\\n'", "%", "newlist", ")", "return", "newlist" ]
This function returns list of files in the given dir
[ "This", "function", "returns", "list", "of", "files", "in", "the", "given", "dir" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/argumentparsing.py#L103-L111
pyBookshelf/bookshelf
bookshelf/api_v2/rackspace.py
create_server_rackspace
def create_server_rackspace(connection, distribution, disk_name, disk_size, ami, region, key_pair, instance_type, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ log_yellow("Creating Rackspace instance...") flavor = connection.flavors.find(name=instance_type) image = connection.images.find(name=ami) server = connection.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = connection.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) return server
python
def create_server_rackspace(connection, distribution, disk_name, disk_size, ami, region, key_pair, instance_type, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ log_yellow("Creating Rackspace instance...") flavor = connection.flavors.find(name=instance_type) image = connection.images.find(name=ami) server = connection.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = connection.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) return server
[ "def", "create_server_rackspace", "(", "connection", ",", "distribution", ",", "disk_name", ",", "disk_size", ",", "ami", ",", "region", ",", "key_pair", ",", "instance_type", ",", "instance_name", ",", "tags", "=", "{", "}", ",", "security_groups", "=", "None", ")", ":", "log_yellow", "(", "\"Creating Rackspace instance...\"", ")", "flavor", "=", "connection", ".", "flavors", ".", "find", "(", "name", "=", "instance_type", ")", "image", "=", "connection", ".", "images", ".", "find", "(", "name", "=", "ami", ")", "server", "=", "connection", ".", "servers", ".", "create", "(", "name", "=", "instance_name", ",", "flavor", "=", "flavor", ".", "id", ",", "image", "=", "image", ".", "id", ",", "region", "=", "region", ",", "availability_zone", "=", "region", ",", "key_name", "=", "key_pair", ")", "while", "server", ".", "status", "==", "'BUILD'", ":", "log_yellow", "(", "\"Waiting for build to finish...\"", ")", "sleep", "(", "5", ")", "server", "=", "connection", ".", "servers", ".", "get", "(", "server", ".", "id", ")", "# check for errors", "if", "server", ".", "status", "!=", "'ACTIVE'", ":", "log_red", "(", "\"Error creating rackspace instance\"", ")", "exit", "(", "1", ")", "# the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address", "ip_address", "=", "server", ".", "accessIPv4", "if", "ip_address", "is", "None", ":", "log_red", "(", "'No IP address assigned'", ")", "exit", "(", "1", ")", "wait_for_ssh", "(", "ip_address", ")", "log_green", "(", "'New server with IP address {0}.'", ".", "format", "(", "ip_address", ")", ")", "return", "server" ]
Creates Rackspace Instance and saves it state in a local json file
[ "Creates", "Rackspace", "Instance", "and", "saves", "it", "state", "in", "a", "local", "json", "file" ]
train
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/rackspace.py#L47-L93
pyBookshelf/bookshelf
bookshelf/api_v2/rackspace.py
destroy_rackspace
def destroy_rackspace(connection, region, instance_id): """ terminates the instance """ server = connection.servers.get(instance_id) log_yellow('deleting rackspace instance ...') server.delete() # wait for server to be deleted try: while True: server = connection.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except: pass log_green('The server has been deleted')
python
def destroy_rackspace(connection, region, instance_id): """ terminates the instance """ server = connection.servers.get(instance_id) log_yellow('deleting rackspace instance ...') server.delete() # wait for server to be deleted try: while True: server = connection.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except: pass log_green('The server has been deleted')
[ "def", "destroy_rackspace", "(", "connection", ",", "region", ",", "instance_id", ")", ":", "server", "=", "connection", ".", "servers", ".", "get", "(", "instance_id", ")", "log_yellow", "(", "'deleting rackspace instance ...'", ")", "server", ".", "delete", "(", ")", "# wait for server to be deleted", "try", ":", "while", "True", ":", "server", "=", "connection", ".", "servers", ".", "get", "(", "server", ".", "id", ")", "log_yellow", "(", "'waiting for deletion ...'", ")", "sleep", "(", "5", ")", "except", ":", "pass", "log_green", "(", "'The server has been deleted'", ")" ]
terminates the instance
[ "terminates", "the", "instance" ]
train
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/rackspace.py#L96-L111
pyBookshelf/bookshelf
bookshelf/api_v2/rackspace.py
get_rackspace_info
def get_rackspace_info(connection, server_id): """ queries Rackspace for details about a particular server id """ server = connection.servers.get(server_id) data = {} data['ip_address'] = server.accessIPv4 data['accessIPv4'] = server.accessIPv4 data['accessIPv6'] = server.accessIPv6 data['addresses'] = server.addresses data['created'] = server.created data['flavor'] = server.flavor data['id'] = server.hostId data['human_id'] = server.human_id data['image'] = server.image['id'] data['key_name'] = server.key_name data['state'] = server.status data['metadata'] = server.metadata data['name'] = server.name data['networks'] = server.networks data['tenant_id'] = server.tenant_id data['user_id'] = server.user_id data['cloud_type'] = 'rackspace' return data
python
def get_rackspace_info(connection, server_id): """ queries Rackspace for details about a particular server id """ server = connection.servers.get(server_id) data = {} data['ip_address'] = server.accessIPv4 data['accessIPv4'] = server.accessIPv4 data['accessIPv6'] = server.accessIPv6 data['addresses'] = server.addresses data['created'] = server.created data['flavor'] = server.flavor data['id'] = server.hostId data['human_id'] = server.human_id data['image'] = server.image['id'] data['key_name'] = server.key_name data['state'] = server.status data['metadata'] = server.metadata data['name'] = server.name data['networks'] = server.networks data['tenant_id'] = server.tenant_id data['user_id'] = server.user_id data['cloud_type'] = 'rackspace' return data
[ "def", "get_rackspace_info", "(", "connection", ",", "server_id", ")", ":", "server", "=", "connection", ".", "servers", ".", "get", "(", "server_id", ")", "data", "=", "{", "}", "data", "[", "'ip_address'", "]", "=", "server", ".", "accessIPv4", "data", "[", "'accessIPv4'", "]", "=", "server", ".", "accessIPv4", "data", "[", "'accessIPv6'", "]", "=", "server", ".", "accessIPv6", "data", "[", "'addresses'", "]", "=", "server", ".", "addresses", "data", "[", "'created'", "]", "=", "server", ".", "created", "data", "[", "'flavor'", "]", "=", "server", ".", "flavor", "data", "[", "'id'", "]", "=", "server", ".", "hostId", "data", "[", "'human_id'", "]", "=", "server", ".", "human_id", "data", "[", "'image'", "]", "=", "server", ".", "image", "[", "'id'", "]", "data", "[", "'key_name'", "]", "=", "server", ".", "key_name", "data", "[", "'state'", "]", "=", "server", ".", "status", "data", "[", "'metadata'", "]", "=", "server", ".", "metadata", "data", "[", "'name'", "]", "=", "server", ".", "name", "data", "[", "'networks'", "]", "=", "server", ".", "networks", "data", "[", "'tenant_id'", "]", "=", "server", ".", "tenant_id", "data", "[", "'user_id'", "]", "=", "server", ".", "user_id", "data", "[", "'cloud_type'", "]", "=", "'rackspace'", "return", "data" ]
queries Rackspace for details about a particular server id
[ "queries", "Rackspace", "for", "details", "about", "a", "particular", "server", "id" ]
train
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/rackspace.py#L142-L166
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
date_decoder
def date_decoder(dic): """Add python types decoding. See JsonEncoder""" if '__date__' in dic: try: d = datetime.date(**{c: v for c, v in dic.items() if not c == "__date__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted date format !", str(dic), 1) elif '__datetime__' in dic: try: d = datetime.datetime(**{c: v for c, v in dic.items() if not c == "__datetime__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted datetime format !", str(dic), 1) else: return dic return d
python
def date_decoder(dic): """Add python types decoding. See JsonEncoder""" if '__date__' in dic: try: d = datetime.date(**{c: v for c, v in dic.items() if not c == "__date__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted date format !", str(dic), 1) elif '__datetime__' in dic: try: d = datetime.datetime(**{c: v for c, v in dic.items() if not c == "__datetime__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted datetime format !", str(dic), 1) else: return dic return d
[ "def", "date_decoder", "(", "dic", ")", ":", "if", "'__date__'", "in", "dic", ":", "try", ":", "d", "=", "datetime", ".", "date", "(", "*", "*", "{", "c", ":", "v", "for", "c", ",", "v", "in", "dic", ".", "items", "(", ")", "if", "not", "c", "==", "\"__date__\"", "}", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "json", ".", "JSONDecodeError", "(", "\"Corrupted date format !\"", ",", "str", "(", "dic", ")", ",", "1", ")", "elif", "'__datetime__'", "in", "dic", ":", "try", ":", "d", "=", "datetime", ".", "datetime", "(", "*", "*", "{", "c", ":", "v", "for", "c", ",", "v", "in", "dic", ".", "items", "(", ")", "if", "not", "c", "==", "\"__datetime__\"", "}", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "json", ".", "JSONDecodeError", "(", "\"Corrupted datetime format !\"", ",", "str", "(", "dic", ")", ",", "1", ")", "else", ":", "return", "dic", "return", "d" ]
Add python types decoding. See JsonEncoder
[ "Add", "python", "types", "decoding", ".", "See", "JsonEncoder" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L31-L45
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
_type_string
def _type_string(label, case=None): """Shortcut for string like fields""" return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), ""
python
def _type_string(label, case=None): """Shortcut for string like fields""" return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), ""
[ "def", "_type_string", "(", "label", ",", "case", "=", "None", ")", ":", "return", "label", ",", "abstractSearch", ".", "in_string", ",", "lambda", "s", ":", "abstractRender", ".", "default", "(", "s", ",", "case", "=", "case", ")", ",", "\"\"" ]
Shortcut for string like fields
[ "Shortcut", "for", "string", "like", "fields" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L301-L303
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
_type_bool
def _type_bool(label,default=False): """Shortcut fot boolean like fields""" return label, abstractSearch.nothing, abstractRender.boolen, default
python
def _type_bool(label,default=False): """Shortcut fot boolean like fields""" return label, abstractSearch.nothing, abstractRender.boolen, default
[ "def", "_type_bool", "(", "label", ",", "default", "=", "False", ")", ":", "return", "label", ",", "abstractSearch", ".", "nothing", ",", "abstractRender", ".", "boolen", ",", "default" ]
Shortcut fot boolean like fields
[ "Shortcut", "fot", "boolean", "like", "fields" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L316-L318
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractSearch.in_string
def in_string(objet, pattern): """ abstractSearch dans une chaine, sans tenir compte de la casse. """ return bool(re.search(pattern, str(objet), flags=re.I)) if objet else False
python
def in_string(objet, pattern): """ abstractSearch dans une chaine, sans tenir compte de la casse. """ return bool(re.search(pattern, str(objet), flags=re.I)) if objet else False
[ "def", "in_string", "(", "objet", ",", "pattern", ")", ":", "return", "bool", "(", "re", ".", "search", "(", "pattern", ",", "str", "(", "objet", ")", ",", "flags", "=", "re", ".", "I", ")", ")", "if", "objet", "else", "False" ]
abstractSearch dans une chaine, sans tenir compte de la casse.
[ "abstractSearch", "dans", "une", "chaine", "sans", "tenir", "compte", "de", "la", "casse", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L199-L201
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractSearch.in_date
def in_date(objet, pattern): """ abstractSearch dans une date datetime.date""" if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.date(objet) return bool(re.search(pattern, objet_str)) return False
python
def in_date(objet, pattern): """ abstractSearch dans une date datetime.date""" if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.date(objet) return bool(re.search(pattern, objet_str)) return False
[ "def", "in_date", "(", "objet", ",", "pattern", ")", ":", "if", "objet", ":", "pattern", "=", "re", ".", "sub", "(", "\" \"", ",", "''", ",", "pattern", ")", "objet_str", "=", "abstractRender", ".", "date", "(", "objet", ")", "return", "bool", "(", "re", ".", "search", "(", "pattern", ",", "objet_str", ")", ")", "return", "False" ]
abstractSearch dans une date datetime.date
[ "abstractSearch", "dans", "une", "date", "datetime", ".", "date" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L204-L210
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractSearch.in_dateheure
def in_dateheure(objet, pattern): """ abstractSearch dans une date-heure datetime.datetime (cf abstractRender.dateheure) """ if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.dateheure(objet) return bool(re.search(pattern, objet_str)) return False
python
def in_dateheure(objet, pattern): """ abstractSearch dans une date-heure datetime.datetime (cf abstractRender.dateheure) """ if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.dateheure(objet) return bool(re.search(pattern, objet_str)) return False
[ "def", "in_dateheure", "(", "objet", ",", "pattern", ")", ":", "if", "objet", ":", "pattern", "=", "re", ".", "sub", "(", "\" \"", ",", "''", ",", "pattern", ")", "objet_str", "=", "abstractRender", ".", "dateheure", "(", "objet", ")", "return", "bool", "(", "re", ".", "search", "(", "pattern", ",", "objet_str", ")", ")", "return", "False" ]
abstractSearch dans une date-heure datetime.datetime (cf abstractRender.dateheure)
[ "abstractSearch", "dans", "une", "date", "-", "heure", "datetime", ".", "datetime", "(", "cf", "abstractRender", ".", "dateheure", ")" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L213-L219
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractSearch.in_telephones
def in_telephones(objet, pattern): """ abstractSearch dans une liste de téléphones.""" objet = objet or [] if pattern == '' or not objet: return False return max(bool(re.search(pattern, t)) for t in objet)
python
def in_telephones(objet, pattern): """ abstractSearch dans une liste de téléphones.""" objet = objet or [] if pattern == '' or not objet: return False return max(bool(re.search(pattern, t)) for t in objet)
[ "def", "in_telephones", "(", "objet", ",", "pattern", ")", ":", "objet", "=", "objet", "or", "[", "]", "if", "pattern", "==", "''", "or", "not", "objet", ":", "return", "False", "return", "max", "(", "bool", "(", "re", ".", "search", "(", "pattern", ",", "t", ")", ")", "for", "t", "in", "objet", ")" ]
abstractSearch dans une liste de téléphones.
[ "abstractSearch", "dans", "une", "liste", "de", "téléphones", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L222-L227
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractRender.date
def date(objet): """ abstractRender d'une date datetime.date""" if objet: return "{}/{}/{}".format(objet.day, objet.month, objet.year) return ""
python
def date(objet): """ abstractRender d'une date datetime.date""" if objet: return "{}/{}/{}".format(objet.day, objet.month, objet.year) return ""
[ "def", "date", "(", "objet", ")", ":", "if", "objet", ":", "return", "\"{}/{}/{}\"", ".", "format", "(", "objet", ".", "day", ",", "objet", ".", "month", ",", "objet", ".", "year", ")", "return", "\"\"" ]
abstractRender d'une date datetime.date
[ "abstractRender", "d", "une", "date", "datetime", ".", "date" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L250-L254
benoitkugler/abstractDataLibrary
pyDLib/Core/formats.py
abstractRender.dateheure
def dateheure(objet): """ abstractRender d'une date-heure datetime.datetime au format JJ/MM/AAAAàHH:mm """ if objet: return "{}/{}/{} à {:02}:{:02}".format(objet.day, objet.month, objet.year, objet.hour, objet.minute) return ""
python
def dateheure(objet): """ abstractRender d'une date-heure datetime.datetime au format JJ/MM/AAAAàHH:mm """ if objet: return "{}/{}/{} à {:02}:{:02}".format(objet.day, objet.month, objet.year, objet.hour, objet.minute) return ""
[ "def", "dateheure", "(", "objet", ")", ":", "if", "objet", ":", "return", "\"{}/{}/{} à {:02}:{:02}\".", "f", "ormat(", "o", "bjet.", "d", "ay,", " ", "bjet.", "m", "onth,", " ", "bjet.", "y", "ear,", " ", "bjet.", "h", "our,", " ", "bjet.", "m", "inute)", "", "return", "\"\"" ]
abstractRender d'une date-heure datetime.datetime au format JJ/MM/AAAAàHH:mm
[ "abstractRender", "d", "une", "date", "-", "heure", "datetime", ".", "datetime", "au", "format", "JJ", "/", "MM", "/", "AAAAàHH", ":", "mm" ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/formats.py#L257-L261
QunarOPS/qg.core
qg/core/gettextutils.py
install
def install(domain, lazy=False): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). :param domain: the translation domain :param lazy: indicates whether or not to install the lazy _() function. The lazy _() introduces a way to do deferred translation of messages by installing a _ that builds Message objects, instead of strings, which can then be lazily translated into any available locale. """ if lazy: # NOTE(mrodden): Lazy gettext functionality. # # The following introduces a deferred way to do translations on # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. def _lazy_gettext(msg): """Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed. """ return Message(msg, domain=domain) from six import moves moves.builtins.__dict__['_'] = _lazy_gettext else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: gettext.install(domain, localedir=os.environ.get(localedir)) else: gettext.install(domain, localedir=os.environ.get(localedir), unicode=True)
python
def install(domain, lazy=False): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). :param domain: the translation domain :param lazy: indicates whether or not to install the lazy _() function. The lazy _() introduces a way to do deferred translation of messages by installing a _ that builds Message objects, instead of strings, which can then be lazily translated into any available locale. """ if lazy: # NOTE(mrodden): Lazy gettext functionality. # # The following introduces a deferred way to do translations on # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. def _lazy_gettext(msg): """Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed. """ return Message(msg, domain=domain) from six import moves moves.builtins.__dict__['_'] = _lazy_gettext else: localedir = '%s_LOCALEDIR' % domain.upper() if six.PY3: gettext.install(domain, localedir=os.environ.get(localedir)) else: gettext.install(domain, localedir=os.environ.get(localedir), unicode=True)
[ "def", "install", "(", "domain", ",", "lazy", "=", "False", ")", ":", "if", "lazy", ":", "# NOTE(mrodden): Lazy gettext functionality.", "#", "# The following introduces a deferred way to do translations on", "# messages in OpenStack. We override the standard _() function", "# and % (format string) operation to build Message objects that can", "# later be translated when we have more information.", "def", "_lazy_gettext", "(", "msg", ")", ":", "\"\"\"Create and return a Message object.\n\n Lazy gettext function for a given domain, it is a factory method\n for a project/module to get a lazy gettext function for its own\n translation domain (i.e. nova, glance, cinder, etc.)\n\n Message encapsulates a string so that we can translate\n it later when needed.\n \"\"\"", "return", "Message", "(", "msg", ",", "domain", "=", "domain", ")", "from", "six", "import", "moves", "moves", ".", "builtins", ".", "__dict__", "[", "'_'", "]", "=", "_lazy_gettext", "else", ":", "localedir", "=", "'%s_LOCALEDIR'", "%", "domain", ".", "upper", "(", ")", "if", "six", ".", "PY3", ":", "gettext", ".", "install", "(", "domain", ",", "localedir", "=", "os", ".", "environ", ".", "get", "(", "localedir", ")", ")", "else", ":", "gettext", ".", "install", "(", "domain", ",", "localedir", "=", "os", ".", "environ", ".", "get", "(", "localedir", ")", ",", "unicode", "=", "True", ")" ]
Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). :param domain: the translation domain :param lazy: indicates whether or not to install the lazy _() function. The lazy _() introduces a way to do deferred translation of messages by installing a _ that builds Message objects, instead of strings, which can then be lazily translated into any available locale.
[ "Install", "a", "_", "()", "function", "using", "the", "given", "translation", "domain", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L83-L130
QunarOPS/qg.core
qg/core/gettextutils.py
get_available_languages
def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they # are perfectly legitimate locales: # https://github.com/mitsuhiko/babel/issues/37 # In Babel 1.3 they fixed the bug and they support these locales, but # they are still not explicitly "listed" by locale_identifiers(). # That is why we add the locales here explicitly if necessary so that # they are listed as supported. aliases = {'zh': 'zh_CN', 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} for (locale_, alias) in six.iteritems(aliases): if locale_ in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list)
python
def get_available_languages(domain): """Lists the available languages for the given translation domain. :param domain: the domain to get languages for """ if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and update all projects list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they # are perfectly legitimate locales: # https://github.com/mitsuhiko/babel/issues/37 # In Babel 1.3 they fixed the bug and they support these locales, but # they are still not explicitly "listed" by locale_identifiers(). # That is why we add the locales here explicitly if necessary so that # they are listed as supported. aliases = {'zh': 'zh_CN', 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} for (locale_, alias) in six.iteritems(aliases): if locale_ in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list)
[ "def", "get_available_languages", "(", "domain", ")", ":", "if", "domain", "in", "_AVAILABLE_LANGUAGES", ":", "return", "copy", ".", "copy", "(", "_AVAILABLE_LANGUAGES", "[", "domain", "]", ")", "localedir", "=", "'%s_LOCALEDIR'", "%", "domain", ".", "upper", "(", ")", "find", "=", "lambda", "x", ":", "gettext", ".", "find", "(", "domain", ",", "localedir", "=", "os", ".", "environ", ".", "get", "(", "localedir", ")", ",", "languages", "=", "[", "x", "]", ")", "# NOTE(mrodden): en_US should always be available (and first in case", "# order matters) since our in-line message strings are en_US", "language_list", "=", "[", "'en_US'", "]", "# NOTE(luisg): Babel <1.0 used a function called list(), which was", "# renamed to locale_identifiers() in >=1.0, the requirements master list", "# requires >=0.9.6, uncapped, so defensively work with both. We can remove", "# this check when the master list updates to >=1.0, and update all projects", "list_identifiers", "=", "(", "getattr", "(", "localedata", ",", "'list'", ",", "None", ")", "or", "getattr", "(", "localedata", ",", "'locale_identifiers'", ")", ")", "locale_identifiers", "=", "list_identifiers", "(", ")", "for", "i", "in", "locale_identifiers", ":", "if", "find", "(", "i", ")", "is", "not", "None", ":", "language_list", ".", "append", "(", "i", ")", "# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported", "# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they", "# are perfectly legitimate locales:", "# https://github.com/mitsuhiko/babel/issues/37", "# In Babel 1.3 they fixed the bug and they support these locales, but", "# they are still not explicitly \"listed\" by locale_identifiers().", "# That is why we add the locales here explicitly if necessary so that", "# they are listed as supported.", "aliases", "=", "{", "'zh'", ":", "'zh_CN'", ",", "'zh_Hant_HK'", ":", "'zh_HK'", ",", "'zh_Hant'", ":", "'zh_TW'", ",", "'fil'", ":", "'tl_PH'", "}", "for", "(", "locale_", ",", "alias", ")", "in", "six", ".", "iteritems", "(", "aliases", ")", ":", "if", "locale_", "in", "language_list", "and", "alias", "not", "in", "language_list", ":", "language_list", ".", "append", "(", "alias", ")", "_AVAILABLE_LANGUAGES", "[", "domain", "]", "=", "language_list", "return", "copy", ".", "copy", "(", "language_list", ")" ]
Lists the available languages for the given translation domain. :param domain: the domain to get languages for
[ "Lists", "the", "available", "languages", "for", "the", "given", "translation", "domain", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L298-L343
QunarOPS/qg.core
qg/core/gettextutils.py
translate
def translate(obj, desired_locale=None): """Gets the translated unicode representation of the given object. If the object is not translatable it is returned as-is. If the locale is None the object is translated to the system locale. :param obj: the object to translate :param desired_locale: the locale to translate the message to, if None the default system locale will be used :returns: the translated object in unicode, or the original object if it could not be translated """ message = obj if not isinstance(message, Message): # If the object to translate is not already translatable, # let's first get its unicode representation message = six.text_type(obj) if isinstance(message, Message): # Even after unicoding() we still need to check if we are # running with translatable unicode before translating return message.translate(desired_locale) return obj
python
def translate(obj, desired_locale=None): """Gets the translated unicode representation of the given object. If the object is not translatable it is returned as-is. If the locale is None the object is translated to the system locale. :param obj: the object to translate :param desired_locale: the locale to translate the message to, if None the default system locale will be used :returns: the translated object in unicode, or the original object if it could not be translated """ message = obj if not isinstance(message, Message): # If the object to translate is not already translatable, # let's first get its unicode representation message = six.text_type(obj) if isinstance(message, Message): # Even after unicoding() we still need to check if we are # running with translatable unicode before translating return message.translate(desired_locale) return obj
[ "def", "translate", "(", "obj", ",", "desired_locale", "=", "None", ")", ":", "message", "=", "obj", "if", "not", "isinstance", "(", "message", ",", "Message", ")", ":", "# If the object to translate is not already translatable,", "# let's first get its unicode representation", "message", "=", "six", ".", "text_type", "(", "obj", ")", "if", "isinstance", "(", "message", ",", "Message", ")", ":", "# Even after unicoding() we still need to check if we are", "# running with translatable unicode before translating", "return", "message", ".", "translate", "(", "desired_locale", ")", "return", "obj" ]
Gets the translated unicode representation of the given object. If the object is not translatable it is returned as-is. If the locale is None the object is translated to the system locale. :param obj: the object to translate :param desired_locale: the locale to translate the message to, if None the default system locale will be used :returns: the translated object in unicode, or the original object if it could not be translated
[ "Gets", "the", "translated", "unicode", "representation", "of", "the", "given", "object", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L346-L367
QunarOPS/qg.core
qg/core/gettextutils.py
_translate_args
def _translate_args(args, desired_locale=None): """Translates all the translatable elements of the given arguments object. This method is used for translating the translatable values in method arguments which include values of tuples or dictionaries. If the object is not a tuple or a dictionary the object itself is translated if it is translatable. If the locale is None the object is translated to the system locale. :param args: the args to translate :param desired_locale: the locale to translate the args to, if None the default system locale will be used :returns: a new args object with the translated contents of the original """ if isinstance(args, tuple): return tuple(translate(v, desired_locale) for v in args) if isinstance(args, dict): translated_dict = {} for (k, v) in six.iteritems(args): translated_v = translate(v, desired_locale) translated_dict[k] = translated_v return translated_dict return translate(args, desired_locale)
python
def _translate_args(args, desired_locale=None): """Translates all the translatable elements of the given arguments object. This method is used for translating the translatable values in method arguments which include values of tuples or dictionaries. If the object is not a tuple or a dictionary the object itself is translated if it is translatable. If the locale is None the object is translated to the system locale. :param args: the args to translate :param desired_locale: the locale to translate the args to, if None the default system locale will be used :returns: a new args object with the translated contents of the original """ if isinstance(args, tuple): return tuple(translate(v, desired_locale) for v in args) if isinstance(args, dict): translated_dict = {} for (k, v) in six.iteritems(args): translated_v = translate(v, desired_locale) translated_dict[k] = translated_v return translated_dict return translate(args, desired_locale)
[ "def", "_translate_args", "(", "args", ",", "desired_locale", "=", "None", ")", ":", "if", "isinstance", "(", "args", ",", "tuple", ")", ":", "return", "tuple", "(", "translate", "(", "v", ",", "desired_locale", ")", "for", "v", "in", "args", ")", "if", "isinstance", "(", "args", ",", "dict", ")", ":", "translated_dict", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "args", ")", ":", "translated_v", "=", "translate", "(", "v", ",", "desired_locale", ")", "translated_dict", "[", "k", "]", "=", "translated_v", "return", "translated_dict", "return", "translate", "(", "args", ",", "desired_locale", ")" ]
Translates all the translatable elements of the given arguments object. This method is used for translating the translatable values in method arguments which include values of tuples or dictionaries. If the object is not a tuple or a dictionary the object itself is translated if it is translatable. If the locale is None the object is translated to the system locale. :param args: the args to translate :param desired_locale: the locale to translate the args to, if None the default system locale will be used :returns: a new args object with the translated contents of the original
[ "Translates", "all", "the", "translatable", "elements", "of", "the", "given", "arguments", "object", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L370-L393
QunarOPS/qg.core
qg/core/gettextutils.py
Message.translate
def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message
python
def translate(self, desired_locale=None): """Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode """ translated_message = Message._translate_msgid(self.msgid, self.domain, desired_locale) if self.params is None: # No need for more translation return translated_message # This Message object may have been formatted with one or more # Message objects as substitution arguments, given either as a single # argument, part of a tuple, or as one or more values in a dictionary. # When translating this Message we need to translate those Messages too translated_params = _translate_args(self.params, desired_locale) translated_message = translated_message % translated_params return translated_message
[ "def", "translate", "(", "self", ",", "desired_locale", "=", "None", ")", ":", "translated_message", "=", "Message", ".", "_translate_msgid", "(", "self", ".", "msgid", ",", "self", ".", "domain", ",", "desired_locale", ")", "if", "self", ".", "params", "is", "None", ":", "# No need for more translation", "return", "translated_message", "# This Message object may have been formatted with one or more", "# Message objects as substitution arguments, given either as a single", "# argument, part of a tuple, or as one or more values in a dictionary.", "# When translating this Message we need to translate those Messages too", "translated_params", "=", "_translate_args", "(", "self", ".", "params", ",", "desired_locale", ")", "translated_message", "=", "translated_message", "%", "translated_params", "return", "translated_message" ]
Translate this message to the desired locale. :param desired_locale: The desired locale to translate the message to, if no locale is provided the message will be translated to the system's default locale. :returns: the translated message in unicode
[ "Translate", "this", "message", "to", "the", "desired", "locale", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L163-L188
QunarOPS/qg.core
qg/core/gettextutils.py
Message._sanitize_mod_params
def _sanitize_mod_params(self, other): """Sanitize the object being modded with this Message. - Add support for modding 'None' so translation supports it - Trim the modded object, which can be a large dictionary, to only those keys that would actually be used in a translation - Snapshot the object being modded, in case the message is translated, it will be used as it was when the Message was created """ if other is None: params = (other,) elif isinstance(other, dict): params = self._trim_dictionary_parameters(other) else: params = self._copy_param(other) return params
python
def _sanitize_mod_params(self, other): """Sanitize the object being modded with this Message. - Add support for modding 'None' so translation supports it - Trim the modded object, which can be a large dictionary, to only those keys that would actually be used in a translation - Snapshot the object being modded, in case the message is translated, it will be used as it was when the Message was created """ if other is None: params = (other,) elif isinstance(other, dict): params = self._trim_dictionary_parameters(other) else: params = self._copy_param(other) return params
[ "def", "_sanitize_mod_params", "(", "self", ",", "other", ")", ":", "if", "other", "is", "None", ":", "params", "=", "(", "other", ",", ")", "elif", "isinstance", "(", "other", ",", "dict", ")", ":", "params", "=", "self", ".", "_trim_dictionary_parameters", "(", "other", ")", "else", ":", "params", "=", "self", ".", "_copy_param", "(", "other", ")", "return", "params" ]
Sanitize the object being modded with this Message. - Add support for modding 'None' so translation supports it - Trim the modded object, which can be a large dictionary, to only those keys that would actually be used in a translation - Snapshot the object being modded, in case the message is translated, it will be used as it was when the Message was created
[ "Sanitize", "the", "object", "being", "modded", "with", "this", "Message", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L225-L240
QunarOPS/qg.core
qg/core/gettextutils.py
Message._trim_dictionary_parameters
def _trim_dictionary_parameters(self, dict_param): """Return a dict that only has matching entries in the msgid.""" # NOTE(luisg): Here we trim down the dictionary passed as parameters # to avoid carrying a lot of unnecessary weight around in the message # object, for example if someone passes in Message() % locals() but # only some params are used, and additionally we prevent errors for # non-deepcopyable objects by unicoding() them. # Look for %(param) keys in msgid; # Skip %% and deal with the case where % is first character on the line keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) # If we don't find any %(param) keys but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): # Apparently the full dictionary is the parameter params = self._copy_param(dict_param) else: params = {} # Save our existing parameters as defaults to protect # ourselves from losing values if we are called through an # (erroneous) chain that builds a valid Message with # arguments, and then does something like "msg % kwds" # where kwds is an empty dictionary. src = {} if isinstance(self.params, dict): src.update(self.params) src.update(dict_param) for key in keys: params[key] = self._copy_param(src[key]) return params
python
def _trim_dictionary_parameters(self, dict_param): """Return a dict that only has matching entries in the msgid.""" # NOTE(luisg): Here we trim down the dictionary passed as parameters # to avoid carrying a lot of unnecessary weight around in the message # object, for example if someone passes in Message() % locals() but # only some params are used, and additionally we prevent errors for # non-deepcopyable objects by unicoding() them. # Look for %(param) keys in msgid; # Skip %% and deal with the case where % is first character on the line keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) # If we don't find any %(param) keys but have a %s if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): # Apparently the full dictionary is the parameter params = self._copy_param(dict_param) else: params = {} # Save our existing parameters as defaults to protect # ourselves from losing values if we are called through an # (erroneous) chain that builds a valid Message with # arguments, and then does something like "msg % kwds" # where kwds is an empty dictionary. src = {} if isinstance(self.params, dict): src.update(self.params) src.update(dict_param) for key in keys: params[key] = self._copy_param(src[key]) return params
[ "def", "_trim_dictionary_parameters", "(", "self", ",", "dict_param", ")", ":", "# NOTE(luisg): Here we trim down the dictionary passed as parameters", "# to avoid carrying a lot of unnecessary weight around in the message", "# object, for example if someone passes in Message() % locals() but", "# only some params are used, and additionally we prevent errors for", "# non-deepcopyable objects by unicoding() them.", "# Look for %(param) keys in msgid;", "# Skip %% and deal with the case where % is first character on the line", "keys", "=", "re", ".", "findall", "(", "'(?:[^%]|^)?%\\((\\w*)\\)[a-z]'", ",", "self", ".", "msgid", ")", "# If we don't find any %(param) keys but have a %s", "if", "not", "keys", "and", "re", ".", "findall", "(", "'(?:[^%]|^)%[a-z]'", ",", "self", ".", "msgid", ")", ":", "# Apparently the full dictionary is the parameter", "params", "=", "self", ".", "_copy_param", "(", "dict_param", ")", "else", ":", "params", "=", "{", "}", "# Save our existing parameters as defaults to protect", "# ourselves from losing values if we are called through an", "# (erroneous) chain that builds a valid Message with", "# arguments, and then does something like \"msg % kwds\"", "# where kwds is an empty dictionary.", "src", "=", "{", "}", "if", "isinstance", "(", "self", ".", "params", ",", "dict", ")", ":", "src", ".", "update", "(", "self", ".", "params", ")", "src", ".", "update", "(", "dict_param", ")", "for", "key", "in", "keys", ":", "params", "[", "key", "]", "=", "self", ".", "_copy_param", "(", "src", "[", "key", "]", ")", "return", "params" ]
Return a dict that only has matching entries in the msgid.
[ "Return", "a", "dict", "that", "only", "has", "matching", "entries", "in", "the", "msgid", "." ]
train
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/gettextutils.py#L242-L272
OnroerendErfgoed/pyramid_urireferencer
pyramid_urireferencer/renderers.py
registry_adapter
def registry_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'query_uri': obj.query_uri, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'applications': [{ 'title': a.title, 'uri': a.uri, 'service_url': a.service_url, 'success': a.success, 'has_references': a.has_references, 'count': a.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in a.items] if a.items is not None else None } for a in obj.applications] if obj.applications is not None else None }
python
def registry_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'query_uri': obj.query_uri, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'applications': [{ 'title': a.title, 'uri': a.uri, 'service_url': a.service_url, 'success': a.success, 'has_references': a.has_references, 'count': a.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in a.items] if a.items is not None else None } for a in obj.applications] if obj.applications is not None else None }
[ "def", "registry_adapter", "(", "obj", ",", "request", ")", ":", "return", "{", "'query_uri'", ":", "obj", ".", "query_uri", ",", "'success'", ":", "obj", ".", "success", ",", "'has_references'", ":", "obj", ".", "has_references", ",", "'count'", ":", "obj", ".", "count", ",", "'applications'", ":", "[", "{", "'title'", ":", "a", ".", "title", ",", "'uri'", ":", "a", ".", "uri", ",", "'service_url'", ":", "a", ".", "service_url", ",", "'success'", ":", "a", ".", "success", ",", "'has_references'", ":", "a", ".", "has_references", ",", "'count'", ":", "a", ".", "count", ",", "'items'", ":", "[", "{", "'uri'", ":", "i", ".", "uri", ",", "'title'", ":", "i", ".", "title", "}", "for", "i", "in", "a", ".", "items", "]", "if", "a", ".", "items", "is", "not", "None", "else", "None", "}", "for", "a", "in", "obj", ".", "applications", "]", "if", "obj", ".", "applications", "is", "not", "None", "else", "None", "}" ]
Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json. :param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered. :rtype: :class:`dict`
[ "Adapter", "for", "rendering", "a", ":", "class", ":", "pyramid_urireferencer", ".", "models", ".", "RegistryResponse", "to", "json", "." ]
train
https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/renderers.py#L14-L38
OnroerendErfgoed/pyramid_urireferencer
pyramid_urireferencer/renderers.py
application_adapter
def application_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json. :param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'title': obj.title, 'uri': obj.uri, 'service_url': obj.service_url, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in obj.items] if obj.items is not None else None }
python
def application_adapter(obj, request): """ Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json. :param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered. :rtype: :class:`dict` """ return { 'title': obj.title, 'uri': obj.uri, 'service_url': obj.service_url, 'success': obj.success, 'has_references': obj.has_references, 'count': obj.count, 'items': [{ 'uri': i.uri, 'title': i.title } for i in obj.items] if obj.items is not None else None }
[ "def", "application_adapter", "(", "obj", ",", "request", ")", ":", "return", "{", "'title'", ":", "obj", ".", "title", ",", "'uri'", ":", "obj", ".", "uri", ",", "'service_url'", ":", "obj", ".", "service_url", ",", "'success'", ":", "obj", ".", "success", ",", "'has_references'", ":", "obj", ".", "has_references", ",", "'count'", ":", "obj", ".", "count", ",", "'items'", ":", "[", "{", "'uri'", ":", "i", ".", "uri", ",", "'title'", ":", "i", ".", "title", "}", "for", "i", "in", "obj", ".", "items", "]", "if", "obj", ".", "items", "is", "not", "None", "else", "None", "}" ]
Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json. :param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered. :rtype: :class:`dict`
[ "Adapter", "for", "rendering", "a", ":", "class", ":", "pyramid_urireferencer", ".", "models", ".", "ApplicationResponse", "to", "json", "." ]
train
https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/renderers.py#L41-L59
tsnaomi/finnsyll
finnsyll/prev/phonology.py
replace_umlauts
def replace_umlauts(word, put_back=False): # use translate() '''If put_back is True, put in umlauts; else, take them out!''' if put_back: word = word.replace('A', 'ä') word = word.replace('O', 'ö') else: word = word.replace('ä', 'A').replace('\xc3\xa4', 'A') word = word.replace('ö', 'O').replace('\xc3\xb6', 'O') return word
python
def replace_umlauts(word, put_back=False): # use translate() '''If put_back is True, put in umlauts; else, take them out!''' if put_back: word = word.replace('A', 'ä') word = word.replace('O', 'ö') else: word = word.replace('ä', 'A').replace('\xc3\xa4', 'A') word = word.replace('ö', 'O').replace('\xc3\xb6', 'O') return word
[ "def", "replace_umlauts", "(", "word", ",", "put_back", "=", "False", ")", ":", "# use translate()", "if", "put_back", ":", "word", "=", "word", ".", "replace", "(", "'A'", ",", "'ä')", "", "word", "=", "word", ".", "replace", "(", "'O'", ",", "'ö')", "", "else", ":", "word", "=", "word", ".", "replace", "(", "'ä',", " ", "A')", ".", "r", "eplace(", "'", "\\xc3\\xa4',", " ", "A')", "", "word", "=", "word", ".", "replace", "(", "'ö',", " ", "O')", ".", "r", "eplace(", "'", "\\xc3\\xb6',", " ", "O')", "", "return", "word" ]
If put_back is True, put in umlauts; else, take them out!
[ "If", "put_back", "is", "True", "put", "in", "umlauts", ";", "else", "take", "them", "out!" ]
train
https://github.com/tsnaomi/finnsyll/blob/6a42740311688c946a636a3e2304866c7aa041b3/finnsyll/prev/phonology.py#L203-L213
OLC-Bioinformatics/sipprverse
cgecore/blaster/blaster.py
Blaster.get_query_align
def get_query_align(hit, contig): """ Function for extracting extra seqeunce data to the query alignment if the full reference length are not covered """ # Getting data needed to extract sequences query_seq = hit['query_string'] homo_seq = hit['homo_string'] sbjct_start = int(hit['sbjct_start']) sbjct_end = int(hit['sbjct_end']) query_start = int(hit['query_start']) query_end = int(hit['query_end']) length = int(hit['sbjct_length']) # If the alignment doesn't start at the first position data is # added to the begnning if sbjct_start != 1: missing = sbjct_start - 1 if(query_start >= missing and hit['strand'] != 1 or hit['strand'] == 1 and missing <= (len(contig) - query_end)): # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: start_pos = query_end end_pos = query_end + missing chars = contig[start_pos:end_pos] chars = Blaster.reversecomplement(chars) else: start_pos = query_start - missing - 1 end_pos = query_start - 1 chars = contig[start_pos:end_pos] query_seq = chars + str(query_seq) else: # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: if query_end == len(contig): query_seq = "-" * missing + str(query_seq) else: start_pos = query_end chars = contig[start_pos:] chars = Blaster.reversecomplement(chars) query_seq = ("-" * (missing - len(chars)) + chars + str(query_seq)) elif query_start < 3: query_seq = "-" * missing + str(query_seq) else: end_pos = query_start - 2 chars = contig[0:end_pos] query_seq = ("-" * (missing - len(chars)) + chars + str(query_seq)) # Adding to the homo sequence spaces = " " * missing homo_seq = str(spaces) + str(homo_seq) # If the alignment dosen't end and the last position data is # added to the end if sbjct_end < length: missing = length - sbjct_end if(missing <= (len(contig) - query_end) and hit['strand'] != 1 or hit['strand'] == 1 and query_start >= missing): # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: start_pos = query_start - missing - 1 end_pos = query_start - 1 chars = contig[start_pos:end_pos] chars = Blaster.reversecomplement(chars) else: start_pos = query_end end_pos = query_end + missing chars = contig[start_pos:end_pos] query_seq = query_seq + chars else: # If the hit is on the other strand the characters are reversed if hit['strand'] == 1: if query_start < 3: query_seq = query_seq + "-" * missing else: end_pos = query_start - 2 chars = contig[0:end_pos] chars = Blaster.reversecomplement(chars) query_seq = (query_seq + chars + "-" * (missing - len(chars))) elif query_end == len(contig): query_seq = query_seq + "-" * missing else: start_pos = query_end chars = contig[start_pos:] query_seq = query_seq + chars + "-" * (missing - len(chars)) # Adding to the homo sequence spaces = " " * int(missing) homo_seq = str(homo_seq) + str(spaces) return query_seq, homo_seq
python
def get_query_align(hit, contig): """ Function for extracting extra seqeunce data to the query alignment if the full reference length are not covered """ # Getting data needed to extract sequences query_seq = hit['query_string'] homo_seq = hit['homo_string'] sbjct_start = int(hit['sbjct_start']) sbjct_end = int(hit['sbjct_end']) query_start = int(hit['query_start']) query_end = int(hit['query_end']) length = int(hit['sbjct_length']) # If the alignment doesn't start at the first position data is # added to the begnning if sbjct_start != 1: missing = sbjct_start - 1 if(query_start >= missing and hit['strand'] != 1 or hit['strand'] == 1 and missing <= (len(contig) - query_end)): # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: start_pos = query_end end_pos = query_end + missing chars = contig[start_pos:end_pos] chars = Blaster.reversecomplement(chars) else: start_pos = query_start - missing - 1 end_pos = query_start - 1 chars = contig[start_pos:end_pos] query_seq = chars + str(query_seq) else: # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: if query_end == len(contig): query_seq = "-" * missing + str(query_seq) else: start_pos = query_end chars = contig[start_pos:] chars = Blaster.reversecomplement(chars) query_seq = ("-" * (missing - len(chars)) + chars + str(query_seq)) elif query_start < 3: query_seq = "-" * missing + str(query_seq) else: end_pos = query_start - 2 chars = contig[0:end_pos] query_seq = ("-" * (missing - len(chars)) + chars + str(query_seq)) # Adding to the homo sequence spaces = " " * missing homo_seq = str(spaces) + str(homo_seq) # If the alignment dosen't end and the last position data is # added to the end if sbjct_end < length: missing = length - sbjct_end if(missing <= (len(contig) - query_end) and hit['strand'] != 1 or hit['strand'] == 1 and query_start >= missing): # Getting the query sequence. # If the the hit is on the other strand the characters # are reversed. if hit['strand'] == 1: start_pos = query_start - missing - 1 end_pos = query_start - 1 chars = contig[start_pos:end_pos] chars = Blaster.reversecomplement(chars) else: start_pos = query_end end_pos = query_end + missing chars = contig[start_pos:end_pos] query_seq = query_seq + chars else: # If the hit is on the other strand the characters are reversed if hit['strand'] == 1: if query_start < 3: query_seq = query_seq + "-" * missing else: end_pos = query_start - 2 chars = contig[0:end_pos] chars = Blaster.reversecomplement(chars) query_seq = (query_seq + chars + "-" * (missing - len(chars))) elif query_end == len(contig): query_seq = query_seq + "-" * missing else: start_pos = query_end chars = contig[start_pos:] query_seq = query_seq + chars + "-" * (missing - len(chars)) # Adding to the homo sequence spaces = " " * int(missing) homo_seq = str(homo_seq) + str(spaces) return query_seq, homo_seq
[ "def", "get_query_align", "(", "hit", ",", "contig", ")", ":", "# Getting data needed to extract sequences", "query_seq", "=", "hit", "[", "'query_string'", "]", "homo_seq", "=", "hit", "[", "'homo_string'", "]", "sbjct_start", "=", "int", "(", "hit", "[", "'sbjct_start'", "]", ")", "sbjct_end", "=", "int", "(", "hit", "[", "'sbjct_end'", "]", ")", "query_start", "=", "int", "(", "hit", "[", "'query_start'", "]", ")", "query_end", "=", "int", "(", "hit", "[", "'query_end'", "]", ")", "length", "=", "int", "(", "hit", "[", "'sbjct_length'", "]", ")", "# If the alignment doesn't start at the first position data is", "# added to the begnning", "if", "sbjct_start", "!=", "1", ":", "missing", "=", "sbjct_start", "-", "1", "if", "(", "query_start", ">=", "missing", "and", "hit", "[", "'strand'", "]", "!=", "1", "or", "hit", "[", "'strand'", "]", "==", "1", "and", "missing", "<=", "(", "len", "(", "contig", ")", "-", "query_end", ")", ")", ":", "# Getting the query sequence.", "# If the the hit is on the other strand the characters", "# are reversed.", "if", "hit", "[", "'strand'", "]", "==", "1", ":", "start_pos", "=", "query_end", "end_pos", "=", "query_end", "+", "missing", "chars", "=", "contig", "[", "start_pos", ":", "end_pos", "]", "chars", "=", "Blaster", ".", "reversecomplement", "(", "chars", ")", "else", ":", "start_pos", "=", "query_start", "-", "missing", "-", "1", "end_pos", "=", "query_start", "-", "1", "chars", "=", "contig", "[", "start_pos", ":", "end_pos", "]", "query_seq", "=", "chars", "+", "str", "(", "query_seq", ")", "else", ":", "# Getting the query sequence.", "# If the the hit is on the other strand the characters", "# are reversed.", "if", "hit", "[", "'strand'", "]", "==", "1", ":", "if", "query_end", "==", "len", "(", "contig", ")", ":", "query_seq", "=", "\"-\"", "*", "missing", "+", "str", "(", "query_seq", ")", "else", ":", "start_pos", "=", "query_end", "chars", "=", "contig", "[", "start_pos", ":", "]", "chars", "=", "Blaster", ".", "reversecomplement", "(", "chars", ")", "query_seq", "=", "(", "\"-\"", "*", "(", "missing", "-", "len", "(", "chars", ")", ")", "+", "chars", "+", "str", "(", "query_seq", ")", ")", "elif", "query_start", "<", "3", ":", "query_seq", "=", "\"-\"", "*", "missing", "+", "str", "(", "query_seq", ")", "else", ":", "end_pos", "=", "query_start", "-", "2", "chars", "=", "contig", "[", "0", ":", "end_pos", "]", "query_seq", "=", "(", "\"-\"", "*", "(", "missing", "-", "len", "(", "chars", ")", ")", "+", "chars", "+", "str", "(", "query_seq", ")", ")", "# Adding to the homo sequence", "spaces", "=", "\" \"", "*", "missing", "homo_seq", "=", "str", "(", "spaces", ")", "+", "str", "(", "homo_seq", ")", "# If the alignment dosen't end and the last position data is", "# added to the end", "if", "sbjct_end", "<", "length", ":", "missing", "=", "length", "-", "sbjct_end", "if", "(", "missing", "<=", "(", "len", "(", "contig", ")", "-", "query_end", ")", "and", "hit", "[", "'strand'", "]", "!=", "1", "or", "hit", "[", "'strand'", "]", "==", "1", "and", "query_start", ">=", "missing", ")", ":", "# Getting the query sequence.", "# If the the hit is on the other strand the characters", "# are reversed.", "if", "hit", "[", "'strand'", "]", "==", "1", ":", "start_pos", "=", "query_start", "-", "missing", "-", "1", "end_pos", "=", "query_start", "-", "1", "chars", "=", "contig", "[", "start_pos", ":", "end_pos", "]", "chars", "=", "Blaster", ".", "reversecomplement", "(", "chars", ")", "else", ":", "start_pos", "=", "query_end", "end_pos", "=", "query_end", "+", "missing", "chars", "=", "contig", "[", "start_pos", ":", "end_pos", "]", "query_seq", "=", "query_seq", "+", "chars", "else", ":", "# If the hit is on the other strand the characters are reversed", "if", "hit", "[", "'strand'", "]", "==", "1", ":", "if", "query_start", "<", "3", ":", "query_seq", "=", "query_seq", "+", "\"-\"", "*", "missing", "else", ":", "end_pos", "=", "query_start", "-", "2", "chars", "=", "contig", "[", "0", ":", "end_pos", "]", "chars", "=", "Blaster", ".", "reversecomplement", "(", "chars", ")", "query_seq", "=", "(", "query_seq", "+", "chars", "+", "\"-\"", "*", "(", "missing", "-", "len", "(", "chars", ")", ")", ")", "elif", "query_end", "==", "len", "(", "contig", ")", ":", "query_seq", "=", "query_seq", "+", "\"-\"", "*", "missing", "else", ":", "start_pos", "=", "query_end", "chars", "=", "contig", "[", "start_pos", ":", "]", "query_seq", "=", "query_seq", "+", "chars", "+", "\"-\"", "*", "(", "missing", "-", "len", "(", "chars", ")", ")", "# Adding to the homo sequence", "spaces", "=", "\" \"", "*", "int", "(", "missing", ")", "homo_seq", "=", "str", "(", "homo_seq", ")", "+", "str", "(", "spaces", ")", "return", "query_seq", ",", "homo_seq" ]
Function for extracting extra seqeunce data to the query alignment if the full reference length are not covered
[ "Function", "for", "extracting", "extra", "seqeunce", "data", "to", "the", "query", "alignment", "if", "the", "full", "reference", "length", "are", "not", "covered" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/blaster/blaster.py#L465-L575
BertrandBordage/django-tablature
tablature/views.py
TableDataViewMixin.get_ordering_for_column
def get_ordering_for_column(self, column, direction): """ Returns a tuple of lookups to order by for the given column and direction. Direction is an integer, either -1, 0 or 1. """ if direction == 0: return () if column in self.orderings: ordering = self.orderings[column] else: field = self.get_field(column) if field is None: return () ordering = column if not isinstance(ordering, (tuple, list)): ordering = [ordering] if direction == 1: return ordering return [lookup[1:] if lookup[0] == '-' else '-' + lookup for lookup in ordering]
python
def get_ordering_for_column(self, column, direction): """ Returns a tuple of lookups to order by for the given column and direction. Direction is an integer, either -1, 0 or 1. """ if direction == 0: return () if column in self.orderings: ordering = self.orderings[column] else: field = self.get_field(column) if field is None: return () ordering = column if not isinstance(ordering, (tuple, list)): ordering = [ordering] if direction == 1: return ordering return [lookup[1:] if lookup[0] == '-' else '-' + lookup for lookup in ordering]
[ "def", "get_ordering_for_column", "(", "self", ",", "column", ",", "direction", ")", ":", "if", "direction", "==", "0", ":", "return", "(", ")", "if", "column", "in", "self", ".", "orderings", ":", "ordering", "=", "self", ".", "orderings", "[", "column", "]", "else", ":", "field", "=", "self", ".", "get_field", "(", "column", ")", "if", "field", "is", "None", ":", "return", "(", ")", "ordering", "=", "column", "if", "not", "isinstance", "(", "ordering", ",", "(", "tuple", ",", "list", ")", ")", ":", "ordering", "=", "[", "ordering", "]", "if", "direction", "==", "1", ":", "return", "ordering", "return", "[", "lookup", "[", "1", ":", "]", "if", "lookup", "[", "0", "]", "==", "'-'", "else", "'-'", "+", "lookup", "for", "lookup", "in", "ordering", "]" ]
Returns a tuple of lookups to order by for the given column and direction. Direction is an integer, either -1, 0 or 1.
[ "Returns", "a", "tuple", "of", "lookups", "to", "order", "by", "for", "the", "given", "column", "and", "direction", ".", "Direction", "is", "an", "integer", "either", "-", "1", "0", "or", "1", "." ]
train
https://github.com/BertrandBordage/django-tablature/blob/f5198b3fb1ebf7f5dfd8ebfd4bf977468e4f0390/tablature/views.py#L116-L135
pymacaron/pymacaron-core
pymacaron_core/swagger/spec.py
ApiSpec.model_to_json
def model_to_json(self, object, cleanup=True): """Take a model instance and return it as a json struct""" model_name = type(object).__name__ if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Marshalling %s into json" % model_name) m = marshal_model(self.spec, model_def, object) if cleanup: self.cleanup_model(m) return m
python
def model_to_json(self, object, cleanup=True): """Take a model instance and return it as a json struct""" model_name = type(object).__name__ if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Marshalling %s into json" % model_name) m = marshal_model(self.spec, model_def, object) if cleanup: self.cleanup_model(m) return m
[ "def", "model_to_json", "(", "self", ",", "object", ",", "cleanup", "=", "True", ")", ":", "model_name", "=", "type", "(", "object", ")", ".", "__name__", "if", "model_name", "not", "in", "self", ".", "swagger_dict", "[", "'definitions'", "]", ":", "raise", "ValidationError", "(", "\"Swagger spec has no definition for model %s\"", "%", "model_name", ")", "model_def", "=", "self", ".", "swagger_dict", "[", "'definitions'", "]", "[", "model_name", "]", "log", ".", "debug", "(", "\"Marshalling %s into json\"", "%", "model_name", ")", "m", "=", "marshal_model", "(", "self", ".", "spec", ",", "model_def", ",", "object", ")", "if", "cleanup", ":", "self", ".", "cleanup_model", "(", "m", ")", "return", "m" ]
Take a model instance and return it as a json struct
[ "Take", "a", "model", "instance", "and", "return", "it", "as", "a", "json", "struct" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L95-L105
pymacaron/pymacaron-core
pymacaron_core/swagger/spec.py
ApiSpec.json_to_model
def json_to_model(self, model_name, j): """Take a json strust and a model name, and return a model instance""" if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Unmarshalling json into %s" % model_name) return unmarshal_model(self.spec, model_def, j)
python
def json_to_model(self, model_name, j): """Take a json strust and a model name, and return a model instance""" if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Unmarshalling json into %s" % model_name) return unmarshal_model(self.spec, model_def, j)
[ "def", "json_to_model", "(", "self", ",", "model_name", ",", "j", ")", ":", "if", "model_name", "not", "in", "self", ".", "swagger_dict", "[", "'definitions'", "]", ":", "raise", "ValidationError", "(", "\"Swagger spec has no definition for model %s\"", "%", "model_name", ")", "model_def", "=", "self", ".", "swagger_dict", "[", "'definitions'", "]", "[", "model_name", "]", "log", ".", "debug", "(", "\"Unmarshalling json into %s\"", "%", "model_name", ")", "return", "unmarshal_model", "(", "self", ".", "spec", ",", "model_def", ",", "j", ")" ]
Take a json strust and a model name, and return a model instance
[ "Take", "a", "json", "strust", "and", "a", "model", "name", "and", "return", "a", "model", "instance" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L124-L130
pymacaron/pymacaron-core
pymacaron_core/swagger/spec.py
ApiSpec.validate
def validate(self, model_name, object): """Validate an object against its swagger model""" if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Validating %s" % model_name) return validate_schema_object(self.spec, model_def, object)
python
def validate(self, model_name, object): """Validate an object against its swagger model""" if model_name not in self.swagger_dict['definitions']: raise ValidationError("Swagger spec has no definition for model %s" % model_name) model_def = self.swagger_dict['definitions'][model_name] log.debug("Validating %s" % model_name) return validate_schema_object(self.spec, model_def, object)
[ "def", "validate", "(", "self", ",", "model_name", ",", "object", ")", ":", "if", "model_name", "not", "in", "self", ".", "swagger_dict", "[", "'definitions'", "]", ":", "raise", "ValidationError", "(", "\"Swagger spec has no definition for model %s\"", "%", "model_name", ")", "model_def", "=", "self", ".", "swagger_dict", "[", "'definitions'", "]", "[", "model_name", "]", "log", ".", "debug", "(", "\"Validating %s\"", "%", "model_name", ")", "return", "validate_schema_object", "(", "self", ".", "spec", ",", "model_def", ",", "object", ")" ]
Validate an object against its swagger model
[ "Validate", "an", "object", "against", "its", "swagger", "model" ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L133-L139
pymacaron/pymacaron-core
pymacaron_core/swagger/spec.py
ApiSpec.call_on_each_endpoint
def call_on_each_endpoint(self, callback): """Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument. """ if 'paths' not in self.swagger_dict: return for path, d in list(self.swagger_dict['paths'].items()): for method, op_spec in list(d.items()): data = EndpointData(path, method) # Which server method handles this endpoint? if 'x-bind-server' not in op_spec: if 'x-no-bind-server' in op_spec: # That route should not be auto-generated log.info("Skipping generation of %s %s" % (method, path)) continue else: raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path)) data.handler_server = op_spec['x-bind-server'] # Make sure that endpoint only produces 'application/json' if 'produces' not in op_spec: raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path)) if len(op_spec['produces']) != 1: raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path)) if op_spec['produces'][0] == 'application/json': data.produces_json = True elif op_spec['produces'][0] == 'text/html': data.produces_html = True else: raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path)) # Which client method handles this endpoint? if 'x-bind-client' in op_spec: data.handler_client = op_spec['x-bind-client'] # Should we decorate the server handler? if 'x-decorate-server' in op_spec: data.decorate_server = op_spec['x-decorate-server'] # Should we manipulate the requests parameters? if 'x-decorate-request' in op_spec: data.decorate_request = op_spec['x-decorate-request'] # Generate a bravado-core operation object data.operation = Operation.from_spec(self.spec, path, method, op_spec) # Figure out how parameters are passed: one json in body? one or # more values in query? if 'parameters' in op_spec: params = op_spec['parameters'] for p in params: if p['in'] == 'body': data.param_in_body = True if p['in'] == 'query': data.param_in_query = True if p['in'] == 'path': data.param_in_path = True if data.param_in_path: # Substitute {...} with <...> in path, to make a Flask friendly path data.path = data.path.replace('{', '<').replace('}', '>') if data.param_in_body and data.param_in_query: raise Exception("Cannot support params in both body and param (%s %s)" % (method, path)) else: data.no_params = True callback(data)
python
def call_on_each_endpoint(self, callback): """Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument. """ if 'paths' not in self.swagger_dict: return for path, d in list(self.swagger_dict['paths'].items()): for method, op_spec in list(d.items()): data = EndpointData(path, method) # Which server method handles this endpoint? if 'x-bind-server' not in op_spec: if 'x-no-bind-server' in op_spec: # That route should not be auto-generated log.info("Skipping generation of %s %s" % (method, path)) continue else: raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path)) data.handler_server = op_spec['x-bind-server'] # Make sure that endpoint only produces 'application/json' if 'produces' not in op_spec: raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path)) if len(op_spec['produces']) != 1: raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path)) if op_spec['produces'][0] == 'application/json': data.produces_json = True elif op_spec['produces'][0] == 'text/html': data.produces_html = True else: raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path)) # Which client method handles this endpoint? if 'x-bind-client' in op_spec: data.handler_client = op_spec['x-bind-client'] # Should we decorate the server handler? if 'x-decorate-server' in op_spec: data.decorate_server = op_spec['x-decorate-server'] # Should we manipulate the requests parameters? if 'x-decorate-request' in op_spec: data.decorate_request = op_spec['x-decorate-request'] # Generate a bravado-core operation object data.operation = Operation.from_spec(self.spec, path, method, op_spec) # Figure out how parameters are passed: one json in body? one or # more values in query? if 'parameters' in op_spec: params = op_spec['parameters'] for p in params: if p['in'] == 'body': data.param_in_body = True if p['in'] == 'query': data.param_in_query = True if p['in'] == 'path': data.param_in_path = True if data.param_in_path: # Substitute {...} with <...> in path, to make a Flask friendly path data.path = data.path.replace('{', '<').replace('}', '>') if data.param_in_body and data.param_in_query: raise Exception("Cannot support params in both body and param (%s %s)" % (method, path)) else: data.no_params = True callback(data)
[ "def", "call_on_each_endpoint", "(", "self", ",", "callback", ")", ":", "if", "'paths'", "not", "in", "self", ".", "swagger_dict", ":", "return", "for", "path", ",", "d", "in", "list", "(", "self", ".", "swagger_dict", "[", "'paths'", "]", ".", "items", "(", ")", ")", ":", "for", "method", ",", "op_spec", "in", "list", "(", "d", ".", "items", "(", ")", ")", ":", "data", "=", "EndpointData", "(", "path", ",", "method", ")", "# Which server method handles this endpoint?", "if", "'x-bind-server'", "not", "in", "op_spec", ":", "if", "'x-no-bind-server'", "in", "op_spec", ":", "# That route should not be auto-generated", "log", ".", "info", "(", "\"Skipping generation of %s %s\"", "%", "(", "method", ",", "path", ")", ")", "continue", "else", ":", "raise", "Exception", "(", "\"Swagger api defines no x-bind-server for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "data", ".", "handler_server", "=", "op_spec", "[", "'x-bind-server'", "]", "# Make sure that endpoint only produces 'application/json'", "if", "'produces'", "not", "in", "op_spec", ":", "raise", "Exception", "(", "\"Swagger api has no 'produces' section for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "if", "len", "(", "op_spec", "[", "'produces'", "]", ")", "!=", "1", ":", "raise", "Exception", "(", "\"Expecting only one type under 'produces' for %s %s\"", "%", "(", "method", ",", "path", ")", ")", "if", "op_spec", "[", "'produces'", "]", "[", "0", "]", "==", "'application/json'", ":", "data", ".", "produces_json", "=", "True", "elif", "op_spec", "[", "'produces'", "]", "[", "0", "]", "==", "'text/html'", ":", "data", ".", "produces_html", "=", "True", "else", ":", "raise", "Exception", "(", "\"Only 'application/json' or 'text/html' are supported. See %s %s\"", "%", "(", "method", ",", "path", ")", ")", "# Which client method handles this endpoint?", "if", "'x-bind-client'", "in", "op_spec", ":", "data", ".", "handler_client", "=", "op_spec", "[", "'x-bind-client'", "]", "# Should we decorate the server handler?", "if", "'x-decorate-server'", "in", "op_spec", ":", "data", ".", "decorate_server", "=", "op_spec", "[", "'x-decorate-server'", "]", "# Should we manipulate the requests parameters?", "if", "'x-decorate-request'", "in", "op_spec", ":", "data", ".", "decorate_request", "=", "op_spec", "[", "'x-decorate-request'", "]", "# Generate a bravado-core operation object", "data", ".", "operation", "=", "Operation", ".", "from_spec", "(", "self", ".", "spec", ",", "path", ",", "method", ",", "op_spec", ")", "# Figure out how parameters are passed: one json in body? one or", "# more values in query?", "if", "'parameters'", "in", "op_spec", ":", "params", "=", "op_spec", "[", "'parameters'", "]", "for", "p", "in", "params", ":", "if", "p", "[", "'in'", "]", "==", "'body'", ":", "data", ".", "param_in_body", "=", "True", "if", "p", "[", "'in'", "]", "==", "'query'", ":", "data", ".", "param_in_query", "=", "True", "if", "p", "[", "'in'", "]", "==", "'path'", ":", "data", ".", "param_in_path", "=", "True", "if", "data", ".", "param_in_path", ":", "# Substitute {...} with <...> in path, to make a Flask friendly path", "data", ".", "path", "=", "data", ".", "path", ".", "replace", "(", "'{'", ",", "'<'", ")", ".", "replace", "(", "'}'", ",", "'>'", ")", "if", "data", ".", "param_in_body", "and", "data", ".", "param_in_query", ":", "raise", "Exception", "(", "\"Cannot support params in both body and param (%s %s)\"", "%", "(", "method", ",", "path", ")", ")", "else", ":", "data", ".", "no_params", "=", "True", "callback", "(", "data", ")" ]
Find all server endpoints defined in the swagger spec and calls 'callback' for each, with an instance of EndpointData as argument.
[ "Find", "all", "server", "endpoints", "defined", "in", "the", "swagger", "spec", "and", "calls", "callback", "for", "each", "with", "an", "instance", "of", "EndpointData", "as", "argument", "." ]
train
https://github.com/pymacaron/pymacaron-core/blob/95070a39ed7065a84244ff5601fea4d54cc72b66/pymacaron_core/swagger/spec.py#L142-L213
andrewguy9/safeoutput
safeoutput/__init__.py
main
def main(args=None): """Buffer stdin and flush, and avoid incomplete files.""" parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( '--binary', dest='mode', action='store_const', const="wb", default="w", help='write in binary mode') parser.add_argument( 'output', metavar='FILE', type=unicode, help='Output file') logging.basicConfig( level=logging.DEBUG, stream=sys.stderr, format='[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s') args = parser.parse_args(args or sys.argv[1:]) with open(args.output, args.mode) as fd: for line in sys.stdin: fd.write(line)
python
def main(args=None): """Buffer stdin and flush, and avoid incomplete files.""" parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( '--binary', dest='mode', action='store_const', const="wb", default="w", help='write in binary mode') parser.add_argument( 'output', metavar='FILE', type=unicode, help='Output file') logging.basicConfig( level=logging.DEBUG, stream=sys.stderr, format='[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s') args = parser.parse_args(args or sys.argv[1:]) with open(args.output, args.mode) as fd: for line in sys.stdin: fd.write(line)
[ "def", "main", "(", "args", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "main", ".", "__doc__", ")", "parser", ".", "add_argument", "(", "'--binary'", ",", "dest", "=", "'mode'", ",", "action", "=", "'store_const'", ",", "const", "=", "\"wb\"", ",", "default", "=", "\"w\"", ",", "help", "=", "'write in binary mode'", ")", "parser", ".", "add_argument", "(", "'output'", ",", "metavar", "=", "'FILE'", ",", "type", "=", "unicode", ",", "help", "=", "'Output file'", ")", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ",", "stream", "=", "sys", ".", "stderr", ",", "format", "=", "'[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s'", ")", "args", "=", "parser", ".", "parse_args", "(", "args", "or", "sys", ".", "argv", "[", "1", ":", "]", ")", "with", "open", "(", "args", ".", "output", ",", "args", ".", "mode", ")", "as", "fd", ":", "for", "line", "in", "sys", ".", "stdin", ":", "fd", ".", "write", "(", "line", ")" ]
Buffer stdin and flush, and avoid incomplete files.
[ "Buffer", "stdin", "and", "flush", "and", "avoid", "incomplete", "files", "." ]
train
https://github.com/andrewguy9/safeoutput/blob/e48fa1f691b57ea81c8faf7539f0bdd28ab59be5/safeoutput/__init__.py#L64-L86
hid-io/layouts-python
layouts/emitter.py
basic_c_defines
def basic_c_defines( layout, keyboard_prefix="KEY_", led_prefix="LED_", sysctrl_prefix="SYS_", cons_prefix="CONS_", code_suffix=True, all_caps=True, space_char="_" ): ''' Generates a list of C defines that can be used to generate a header file @param layout: Layout object @keyboard_prefix: Prefix used for to_hid_keyboard @led_prefix: Prefix used for to_hid_led @sysctrl_prefix: Prefix used for to_hid_sysctrl @cons_prefix: Prefix used for to_hid_consumer @code_suffix: Append _<usb code> to each name @all_caps: Set to true if labels should be converted to all caps @space_char: Character to replace space with @returns: List of C tuples (<name>, <number>) that can be used to generate C-style defines. Each section has it's own list. ''' # Keyboard Codes keyboard_defines = [] for code, name in layout.json()['to_hid_keyboard'].items(): new_name = "{}{}".format(keyboard_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) keyboard_defines.append(define) # LED Codes led_defines = [] for code, name in layout.json()['to_hid_led'].items(): new_name = "{}{}".format(led_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) led_defines.append(define) # System Control Codes sysctrl_defines = [] for code, name in layout.json()['to_hid_sysctrl'].items(): new_name = "{}{}".format(sysctrl_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) sysctrl_defines.append(define) # Consumer Codes cons_defines = [] for code, name in layout.json()['to_hid_consumer'].items(): new_name = "{}{}".format(cons_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) cons_defines.append(define) # Return list of list of tuples defines = [keyboard_defines, led_defines, sysctrl_defines, cons_defines] return defines
python
def basic_c_defines( layout, keyboard_prefix="KEY_", led_prefix="LED_", sysctrl_prefix="SYS_", cons_prefix="CONS_", code_suffix=True, all_caps=True, space_char="_" ): ''' Generates a list of C defines that can be used to generate a header file @param layout: Layout object @keyboard_prefix: Prefix used for to_hid_keyboard @led_prefix: Prefix used for to_hid_led @sysctrl_prefix: Prefix used for to_hid_sysctrl @cons_prefix: Prefix used for to_hid_consumer @code_suffix: Append _<usb code> to each name @all_caps: Set to true if labels should be converted to all caps @space_char: Character to replace space with @returns: List of C tuples (<name>, <number>) that can be used to generate C-style defines. Each section has it's own list. ''' # Keyboard Codes keyboard_defines = [] for code, name in layout.json()['to_hid_keyboard'].items(): new_name = "{}{}".format(keyboard_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) keyboard_defines.append(define) # LED Codes led_defines = [] for code, name in layout.json()['to_hid_led'].items(): new_name = "{}{}".format(led_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) led_defines.append(define) # System Control Codes sysctrl_defines = [] for code, name in layout.json()['to_hid_sysctrl'].items(): new_name = "{}{}".format(sysctrl_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) sysctrl_defines.append(define) # Consumer Codes cons_defines = [] for code, name in layout.json()['to_hid_consumer'].items(): new_name = "{}{}".format(cons_prefix, name.replace(' ', space_char)) if all_caps: new_name = new_name.upper() if code_suffix: new_name = "{}_{}".format(new_name, int(code, 0)) define = (new_name, code) cons_defines.append(define) # Return list of list of tuples defines = [keyboard_defines, led_defines, sysctrl_defines, cons_defines] return defines
[ "def", "basic_c_defines", "(", "layout", ",", "keyboard_prefix", "=", "\"KEY_\"", ",", "led_prefix", "=", "\"LED_\"", ",", "sysctrl_prefix", "=", "\"SYS_\"", ",", "cons_prefix", "=", "\"CONS_\"", ",", "code_suffix", "=", "True", ",", "all_caps", "=", "True", ",", "space_char", "=", "\"_\"", ")", ":", "# Keyboard Codes", "keyboard_defines", "=", "[", "]", "for", "code", ",", "name", "in", "layout", ".", "json", "(", ")", "[", "'to_hid_keyboard'", "]", ".", "items", "(", ")", ":", "new_name", "=", "\"{}{}\"", ".", "format", "(", "keyboard_prefix", ",", "name", ".", "replace", "(", "' '", ",", "space_char", ")", ")", "if", "all_caps", ":", "new_name", "=", "new_name", ".", "upper", "(", ")", "if", "code_suffix", ":", "new_name", "=", "\"{}_{}\"", ".", "format", "(", "new_name", ",", "int", "(", "code", ",", "0", ")", ")", "define", "=", "(", "new_name", ",", "code", ")", "keyboard_defines", ".", "append", "(", "define", ")", "# LED Codes", "led_defines", "=", "[", "]", "for", "code", ",", "name", "in", "layout", ".", "json", "(", ")", "[", "'to_hid_led'", "]", ".", "items", "(", ")", ":", "new_name", "=", "\"{}{}\"", ".", "format", "(", "led_prefix", ",", "name", ".", "replace", "(", "' '", ",", "space_char", ")", ")", "if", "all_caps", ":", "new_name", "=", "new_name", ".", "upper", "(", ")", "if", "code_suffix", ":", "new_name", "=", "\"{}_{}\"", ".", "format", "(", "new_name", ",", "int", "(", "code", ",", "0", ")", ")", "define", "=", "(", "new_name", ",", "code", ")", "led_defines", ".", "append", "(", "define", ")", "# System Control Codes", "sysctrl_defines", "=", "[", "]", "for", "code", ",", "name", "in", "layout", ".", "json", "(", ")", "[", "'to_hid_sysctrl'", "]", ".", "items", "(", ")", ":", "new_name", "=", "\"{}{}\"", ".", "format", "(", "sysctrl_prefix", ",", "name", ".", "replace", "(", "' '", ",", "space_char", ")", ")", "if", "all_caps", ":", "new_name", "=", "new_name", ".", "upper", "(", ")", "if", "code_suffix", ":", "new_name", "=", "\"{}_{}\"", ".", "format", "(", "new_name", ",", "int", "(", "code", ",", "0", ")", ")", "define", "=", "(", "new_name", ",", "code", ")", "sysctrl_defines", ".", "append", "(", "define", ")", "# Consumer Codes", "cons_defines", "=", "[", "]", "for", "code", ",", "name", "in", "layout", ".", "json", "(", ")", "[", "'to_hid_consumer'", "]", ".", "items", "(", ")", ":", "new_name", "=", "\"{}{}\"", ".", "format", "(", "cons_prefix", ",", "name", ".", "replace", "(", "' '", ",", "space_char", ")", ")", "if", "all_caps", ":", "new_name", "=", "new_name", ".", "upper", "(", ")", "if", "code_suffix", ":", "new_name", "=", "\"{}_{}\"", ".", "format", "(", "new_name", ",", "int", "(", "code", ",", "0", ")", ")", "define", "=", "(", "new_name", ",", "code", ")", "cons_defines", ".", "append", "(", "define", ")", "# Return list of list of tuples", "defines", "=", "[", "keyboard_defines", ",", "led_defines", ",", "sysctrl_defines", ",", "cons_defines", "]", "return", "defines" ]
Generates a list of C defines that can be used to generate a header file @param layout: Layout object @keyboard_prefix: Prefix used for to_hid_keyboard @led_prefix: Prefix used for to_hid_led @sysctrl_prefix: Prefix used for to_hid_sysctrl @cons_prefix: Prefix used for to_hid_consumer @code_suffix: Append _<usb code> to each name @all_caps: Set to true if labels should be converted to all caps @space_char: Character to replace space with @returns: List of C tuples (<name>, <number>) that can be used to generate C-style defines. Each section has it's own list.
[ "Generates", "a", "list", "of", "C", "defines", "that", "can", "be", "used", "to", "generate", "a", "header", "file" ]
train
https://github.com/hid-io/layouts-python/blob/b347578bfb4198fd812ecd7a2d9c7e551a856280/layouts/emitter.py#L11-L81
AASHE/django-constant-contact
django_constant_contact/models.py
ConstantContact.new_email_marketing_campaign
def new_email_marketing_campaign(self, name, email_content, from_email, from_name, reply_to_email, subject, text_content, address, is_view_as_webpage_enabled=False, view_as_web_page_link_text='', view_as_web_page_text='', is_permission_reminder_enabled=False, permission_reminder_text=''): """Create a Constant Contact email marketing campaign. Returns an EmailMarketingCampaign object. """ url = self.api.join(self.EMAIL_MARKETING_CAMPAIGN_URL) inlined_email_content = self.inline_css(email_content) minified_email_content = html_minify(inlined_email_content) worked_around_email_content = work_around(minified_email_content) data = { 'name': name, 'subject': subject, 'from_name': from_name, 'from_email': from_email, 'reply_to_email': reply_to_email, 'email_content': worked_around_email_content, 'email_content_format': 'HTML', 'text_content': text_content, 'message_footer': { 'organization_name': address['organization_name'], 'address_line_1': address['address_line_1'], 'address_line_2': address['address_line_2'], 'address_line_3': address['address_line_3'], 'city': address['city'], 'state': address['state'], 'international_state': address['international_state'], 'postal_code': address['postal_code'], 'country': address['country'] }, 'is_view_as_webpage_enabled': is_view_as_webpage_enabled, 'view_as_web_page_link_text': view_as_web_page_link_text, 'view_as_web_page_text': view_as_web_page_text, 'is_permission_reminder_enabled': is_permission_reminder_enabled, 'permission_reminder_text': permission_reminder_text } response = url.post(data=json.dumps(data), headers={'content-type': 'application/json'}) self.handle_response_status(response) return EmailMarketingCampaign.objects.create(data=response.json())
python
def new_email_marketing_campaign(self, name, email_content, from_email, from_name, reply_to_email, subject, text_content, address, is_view_as_webpage_enabled=False, view_as_web_page_link_text='', view_as_web_page_text='', is_permission_reminder_enabled=False, permission_reminder_text=''): """Create a Constant Contact email marketing campaign. Returns an EmailMarketingCampaign object. """ url = self.api.join(self.EMAIL_MARKETING_CAMPAIGN_URL) inlined_email_content = self.inline_css(email_content) minified_email_content = html_minify(inlined_email_content) worked_around_email_content = work_around(minified_email_content) data = { 'name': name, 'subject': subject, 'from_name': from_name, 'from_email': from_email, 'reply_to_email': reply_to_email, 'email_content': worked_around_email_content, 'email_content_format': 'HTML', 'text_content': text_content, 'message_footer': { 'organization_name': address['organization_name'], 'address_line_1': address['address_line_1'], 'address_line_2': address['address_line_2'], 'address_line_3': address['address_line_3'], 'city': address['city'], 'state': address['state'], 'international_state': address['international_state'], 'postal_code': address['postal_code'], 'country': address['country'] }, 'is_view_as_webpage_enabled': is_view_as_webpage_enabled, 'view_as_web_page_link_text': view_as_web_page_link_text, 'view_as_web_page_text': view_as_web_page_text, 'is_permission_reminder_enabled': is_permission_reminder_enabled, 'permission_reminder_text': permission_reminder_text } response = url.post(data=json.dumps(data), headers={'content-type': 'application/json'}) self.handle_response_status(response) return EmailMarketingCampaign.objects.create(data=response.json())
[ "def", "new_email_marketing_campaign", "(", "self", ",", "name", ",", "email_content", ",", "from_email", ",", "from_name", ",", "reply_to_email", ",", "subject", ",", "text_content", ",", "address", ",", "is_view_as_webpage_enabled", "=", "False", ",", "view_as_web_page_link_text", "=", "''", ",", "view_as_web_page_text", "=", "''", ",", "is_permission_reminder_enabled", "=", "False", ",", "permission_reminder_text", "=", "''", ")", ":", "url", "=", "self", ".", "api", ".", "join", "(", "self", ".", "EMAIL_MARKETING_CAMPAIGN_URL", ")", "inlined_email_content", "=", "self", ".", "inline_css", "(", "email_content", ")", "minified_email_content", "=", "html_minify", "(", "inlined_email_content", ")", "worked_around_email_content", "=", "work_around", "(", "minified_email_content", ")", "data", "=", "{", "'name'", ":", "name", ",", "'subject'", ":", "subject", ",", "'from_name'", ":", "from_name", ",", "'from_email'", ":", "from_email", ",", "'reply_to_email'", ":", "reply_to_email", ",", "'email_content'", ":", "worked_around_email_content", ",", "'email_content_format'", ":", "'HTML'", ",", "'text_content'", ":", "text_content", ",", "'message_footer'", ":", "{", "'organization_name'", ":", "address", "[", "'organization_name'", "]", ",", "'address_line_1'", ":", "address", "[", "'address_line_1'", "]", ",", "'address_line_2'", ":", "address", "[", "'address_line_2'", "]", ",", "'address_line_3'", ":", "address", "[", "'address_line_3'", "]", ",", "'city'", ":", "address", "[", "'city'", "]", ",", "'state'", ":", "address", "[", "'state'", "]", ",", "'international_state'", ":", "address", "[", "'international_state'", "]", ",", "'postal_code'", ":", "address", "[", "'postal_code'", "]", ",", "'country'", ":", "address", "[", "'country'", "]", "}", ",", "'is_view_as_webpage_enabled'", ":", "is_view_as_webpage_enabled", ",", "'view_as_web_page_link_text'", ":", "view_as_web_page_link_text", ",", "'view_as_web_page_text'", ":", "view_as_web_page_text", ",", "'is_permission_reminder_enabled'", ":", "is_permission_reminder_enabled", ",", "'permission_reminder_text'", ":", "permission_reminder_text", "}", "response", "=", "url", ".", "post", "(", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", ")", "self", ".", "handle_response_status", "(", "response", ")", "return", "EmailMarketingCampaign", ".", "objects", ".", "create", "(", "data", "=", "response", ".", "json", "(", ")", ")" ]
Create a Constant Contact email marketing campaign. Returns an EmailMarketingCampaign object.
[ "Create", "a", "Constant", "Contact", "email", "marketing", "campaign", ".", "Returns", "an", "EmailMarketingCampaign", "object", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L56-L105
AASHE/django-constant-contact
django_constant_contact/models.py
ConstantContact.update_email_marketing_campaign
def update_email_marketing_campaign(self, email_marketing_campaign, name, email_content, from_email, from_name, reply_to_email, subject, text_content, address, is_view_as_webpage_enabled=False, view_as_web_page_link_text='', view_as_web_page_text='', is_permission_reminder_enabled=False, permission_reminder_text=''): """Update a Constant Contact email marketing campaign. Returns the updated EmailMarketingCampaign object. """ url = self.api.join( '/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id)])) inlined_email_content = self.inline_css(email_content) minified_email_content = html_minify(inlined_email_content) worked_around_email_content = work_around(minified_email_content) data = { 'name': name, 'subject': subject, 'from_name': from_name, 'from_email': from_email, 'reply_to_email': reply_to_email, 'email_content': worked_around_email_content, 'email_content_format': 'HTML', 'text_content': text_content, 'message_footer': { 'organization_name': address['organization_name'], 'address_line_1': address['address_line_1'], 'address_line_2': address['address_line_2'], 'address_line_3': address['address_line_3'], 'city': address['city'], 'state': address['state'], 'international_state': address['international_state'], 'postal_code': address['postal_code'], 'country': address['country'] }, 'is_view_as_webpage_enabled': is_view_as_webpage_enabled, 'view_as_web_page_link_text': view_as_web_page_link_text, 'view_as_web_page_text': view_as_web_page_text, 'is_permission_reminder_enabled': is_permission_reminder_enabled, 'permission_reminder_text': permission_reminder_text } response = url.put(data=json.dumps(data), headers={'content-type': 'application/json'}) self.handle_response_status(response) email_marketing_campaign.data = response.json() email_marketing_campaign.save() return email_marketing_campaign
python
def update_email_marketing_campaign(self, email_marketing_campaign, name, email_content, from_email, from_name, reply_to_email, subject, text_content, address, is_view_as_webpage_enabled=False, view_as_web_page_link_text='', view_as_web_page_text='', is_permission_reminder_enabled=False, permission_reminder_text=''): """Update a Constant Contact email marketing campaign. Returns the updated EmailMarketingCampaign object. """ url = self.api.join( '/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id)])) inlined_email_content = self.inline_css(email_content) minified_email_content = html_minify(inlined_email_content) worked_around_email_content = work_around(minified_email_content) data = { 'name': name, 'subject': subject, 'from_name': from_name, 'from_email': from_email, 'reply_to_email': reply_to_email, 'email_content': worked_around_email_content, 'email_content_format': 'HTML', 'text_content': text_content, 'message_footer': { 'organization_name': address['organization_name'], 'address_line_1': address['address_line_1'], 'address_line_2': address['address_line_2'], 'address_line_3': address['address_line_3'], 'city': address['city'], 'state': address['state'], 'international_state': address['international_state'], 'postal_code': address['postal_code'], 'country': address['country'] }, 'is_view_as_webpage_enabled': is_view_as_webpage_enabled, 'view_as_web_page_link_text': view_as_web_page_link_text, 'view_as_web_page_text': view_as_web_page_text, 'is_permission_reminder_enabled': is_permission_reminder_enabled, 'permission_reminder_text': permission_reminder_text } response = url.put(data=json.dumps(data), headers={'content-type': 'application/json'}) self.handle_response_status(response) email_marketing_campaign.data = response.json() email_marketing_campaign.save() return email_marketing_campaign
[ "def", "update_email_marketing_campaign", "(", "self", ",", "email_marketing_campaign", ",", "name", ",", "email_content", ",", "from_email", ",", "from_name", ",", "reply_to_email", ",", "subject", ",", "text_content", ",", "address", ",", "is_view_as_webpage_enabled", "=", "False", ",", "view_as_web_page_link_text", "=", "''", ",", "view_as_web_page_text", "=", "''", ",", "is_permission_reminder_enabled", "=", "False", ",", "permission_reminder_text", "=", "''", ")", ":", "url", "=", "self", ".", "api", ".", "join", "(", "'/'", ".", "join", "(", "[", "self", ".", "EMAIL_MARKETING_CAMPAIGN_URL", ",", "str", "(", "email_marketing_campaign", ".", "constant_contact_id", ")", "]", ")", ")", "inlined_email_content", "=", "self", ".", "inline_css", "(", "email_content", ")", "minified_email_content", "=", "html_minify", "(", "inlined_email_content", ")", "worked_around_email_content", "=", "work_around", "(", "minified_email_content", ")", "data", "=", "{", "'name'", ":", "name", ",", "'subject'", ":", "subject", ",", "'from_name'", ":", "from_name", ",", "'from_email'", ":", "from_email", ",", "'reply_to_email'", ":", "reply_to_email", ",", "'email_content'", ":", "worked_around_email_content", ",", "'email_content_format'", ":", "'HTML'", ",", "'text_content'", ":", "text_content", ",", "'message_footer'", ":", "{", "'organization_name'", ":", "address", "[", "'organization_name'", "]", ",", "'address_line_1'", ":", "address", "[", "'address_line_1'", "]", ",", "'address_line_2'", ":", "address", "[", "'address_line_2'", "]", ",", "'address_line_3'", ":", "address", "[", "'address_line_3'", "]", ",", "'city'", ":", "address", "[", "'city'", "]", ",", "'state'", ":", "address", "[", "'state'", "]", ",", "'international_state'", ":", "address", "[", "'international_state'", "]", ",", "'postal_code'", ":", "address", "[", "'postal_code'", "]", ",", "'country'", ":", "address", "[", "'country'", "]", "}", ",", "'is_view_as_webpage_enabled'", ":", "is_view_as_webpage_enabled", ",", "'view_as_web_page_link_text'", ":", "view_as_web_page_link_text", ",", "'view_as_web_page_text'", ":", "view_as_web_page_text", ",", "'is_permission_reminder_enabled'", ":", "is_permission_reminder_enabled", ",", "'permission_reminder_text'", ":", "permission_reminder_text", "}", "response", "=", "url", ".", "put", "(", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", ")", "self", ".", "handle_response_status", "(", "response", ")", "email_marketing_campaign", ".", "data", "=", "response", ".", "json", "(", ")", "email_marketing_campaign", ".", "save", "(", ")", "return", "email_marketing_campaign" ]
Update a Constant Contact email marketing campaign. Returns the updated EmailMarketingCampaign object.
[ "Update", "a", "Constant", "Contact", "email", "marketing", "campaign", ".", "Returns", "the", "updated", "EmailMarketingCampaign", "object", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L107-L162
AASHE/django-constant-contact
django_constant_contact/models.py
ConstantContact.delete_email_marketing_campaign
def delete_email_marketing_campaign(self, email_marketing_campaign): """Deletes a Constant Contact email marketing campaign. """ url = self.api.join('/'.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id)])) response = url.delete() self.handle_response_status(response) return response
python
def delete_email_marketing_campaign(self, email_marketing_campaign): """Deletes a Constant Contact email marketing campaign. """ url = self.api.join('/'.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id)])) response = url.delete() self.handle_response_status(response) return response
[ "def", "delete_email_marketing_campaign", "(", "self", ",", "email_marketing_campaign", ")", ":", "url", "=", "self", ".", "api", ".", "join", "(", "'/'", ".", "join", "(", "[", "self", ".", "EMAIL_MARKETING_CAMPAIGN_URL", ",", "str", "(", "email_marketing_campaign", ".", "constant_contact_id", ")", "]", ")", ")", "response", "=", "url", ".", "delete", "(", ")", "self", ".", "handle_response_status", "(", "response", ")", "return", "response" ]
Deletes a Constant Contact email marketing campaign.
[ "Deletes", "a", "Constant", "Contact", "email", "marketing", "campaign", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L164-L172
AASHE/django-constant-contact
django_constant_contact/models.py
ConstantContact.inline_css
def inline_css(self, html): """Inlines CSS defined in external style sheets. """ premailer = Premailer(html) inlined_html = premailer.transform(pretty_print=True) return inlined_html
python
def inline_css(self, html): """Inlines CSS defined in external style sheets. """ premailer = Premailer(html) inlined_html = premailer.transform(pretty_print=True) return inlined_html
[ "def", "inline_css", "(", "self", ",", "html", ")", ":", "premailer", "=", "Premailer", "(", "html", ")", "inlined_html", "=", "premailer", ".", "transform", "(", "pretty_print", "=", "True", ")", "return", "inlined_html" ]
Inlines CSS defined in external style sheets.
[ "Inlines", "CSS", "defined", "in", "external", "style", "sheets", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L174-L179
AASHE/django-constant-contact
django_constant_contact/models.py
ConstantContact.preview_email_marketing_campaign
def preview_email_marketing_campaign(self, email_marketing_campaign): """Returns HTML and text previews of an EmailMarketingCampaign. """ url = self.api.join('/'.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id), 'preview'])) response = url.get() self.handle_response_status(response) return (response.json()['preview_email_content'], response.json()['preview_text_content'])
python
def preview_email_marketing_campaign(self, email_marketing_campaign): """Returns HTML and text previews of an EmailMarketingCampaign. """ url = self.api.join('/'.join([ self.EMAIL_MARKETING_CAMPAIGN_URL, str(email_marketing_campaign.constant_contact_id), 'preview'])) response = url.get() self.handle_response_status(response) return (response.json()['preview_email_content'], response.json()['preview_text_content'])
[ "def", "preview_email_marketing_campaign", "(", "self", ",", "email_marketing_campaign", ")", ":", "url", "=", "self", ".", "api", ".", "join", "(", "'/'", ".", "join", "(", "[", "self", ".", "EMAIL_MARKETING_CAMPAIGN_URL", ",", "str", "(", "email_marketing_campaign", ".", "constant_contact_id", ")", ",", "'preview'", "]", ")", ")", "response", "=", "url", ".", "get", "(", ")", "self", ".", "handle_response_status", "(", "response", ")", "return", "(", "response", ".", "json", "(", ")", "[", "'preview_email_content'", "]", ",", "response", ".", "json", "(", ")", "[", "'preview_text_content'", "]", ")" ]
Returns HTML and text previews of an EmailMarketingCampaign.
[ "Returns", "HTML", "and", "text", "previews", "of", "an", "EmailMarketingCampaign", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L181-L191
AASHE/django-constant-contact
django_constant_contact/models.py
EmailMarketingCampaign.pre_save
def pre_save(cls, sender, instance, *args, **kwargs): """Pull constant_contact_id out of data. """ instance.constant_contact_id = str(instance.data['id'])
python
def pre_save(cls, sender, instance, *args, **kwargs): """Pull constant_contact_id out of data. """ instance.constant_contact_id = str(instance.data['id'])
[ "def", "pre_save", "(", "cls", ",", "sender", ",", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "instance", ".", "constant_contact_id", "=", "str", "(", "instance", ".", "data", "[", "'id'", "]", ")" ]
Pull constant_contact_id out of data.
[ "Pull", "constant_contact_id", "out", "of", "data", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L216-L219
AASHE/django-constant-contact
django_constant_contact/models.py
EmailMarketingCampaign.pre_delete
def pre_delete(cls, sender, instance, *args, **kwargs): """Deletes the CC email marketing campaign associated with me. """ cc = ConstantContact() response = cc.delete_email_marketing_campaign(instance) response.raise_for_status()
python
def pre_delete(cls, sender, instance, *args, **kwargs): """Deletes the CC email marketing campaign associated with me. """ cc = ConstantContact() response = cc.delete_email_marketing_campaign(instance) response.raise_for_status()
[ "def", "pre_delete", "(", "cls", ",", "sender", ",", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cc", "=", "ConstantContact", "(", ")", "response", "=", "cc", ".", "delete_email_marketing_campaign", "(", "instance", ")", "response", ".", "raise_for_status", "(", ")" ]
Deletes the CC email marketing campaign associated with me.
[ "Deletes", "the", "CC", "email", "marketing", "campaign", "associated", "with", "me", "." ]
train
https://github.com/AASHE/django-constant-contact/blob/2a37f00ee62531804414b35637d0dad5992d5822/django_constant_contact/models.py#L222-L227
OLC-Bioinformatics/sipprverse
sixteenS/sixteenS_probes.py
SixteenS.runner
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting {} analysis pipeline'.format(self.analysistype), self.starttime) if not self.pipeline: # If the metadata has been passed from the method script, self.pipeline must still be false in order to # get Sippr() to function correctly, but the metadata shouldn't be recreated try: _ = vars(self.runmetadata)['samples'] except KeyError: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses # Sippr(self, self.cutoff) ProbeSippr(self, self.cutoff) # # self.attributer() # Create the reports # self.sipprverse_reporter() # Print the metadata printer = MetadataPrinter(self) printer.printmetadata() quit()
python
def runner(self): """ Run the necessary methods in the correct order """ printtime('Starting {} analysis pipeline'.format(self.analysistype), self.starttime) if not self.pipeline: # If the metadata has been passed from the method script, self.pipeline must still be false in order to # get Sippr() to function correctly, but the metadata shouldn't be recreated try: _ = vars(self.runmetadata)['samples'] except KeyError: # Create the objects to be used in the analyses objects = Objectprep(self) objects.objectprep() self.runmetadata = objects.samples # Run the analyses # Sippr(self, self.cutoff) ProbeSippr(self, self.cutoff) # # self.attributer() # Create the reports # self.sipprverse_reporter() # Print the metadata printer = MetadataPrinter(self) printer.printmetadata() quit()
[ "def", "runner", "(", "self", ")", ":", "printtime", "(", "'Starting {} analysis pipeline'", ".", "format", "(", "self", ".", "analysistype", ")", ",", "self", ".", "starttime", ")", "if", "not", "self", ".", "pipeline", ":", "# If the metadata has been passed from the method script, self.pipeline must still be false in order to", "# get Sippr() to function correctly, but the metadata shouldn't be recreated", "try", ":", "_", "=", "vars", "(", "self", ".", "runmetadata", ")", "[", "'samples'", "]", "except", "KeyError", ":", "# Create the objects to be used in the analyses", "objects", "=", "Objectprep", "(", "self", ")", "objects", ".", "objectprep", "(", ")", "self", ".", "runmetadata", "=", "objects", ".", "samples", "# Run the analyses", "# Sippr(self, self.cutoff)", "ProbeSippr", "(", "self", ",", "self", ".", "cutoff", ")", "#", "# self.attributer()", "# Create the reports", "# self.sipprverse_reporter()", "# Print the metadata", "printer", "=", "MetadataPrinter", "(", "self", ")", "printer", ".", "printmetadata", "(", ")", "quit", "(", ")" ]
Run the necessary methods in the correct order
[ "Run", "the", "necessary", "methods", "in", "the", "correct", "order" ]
train
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/sixteenS/sixteenS_probes.py#L418-L443
JeromeParadis/django-mailing
mailing/mail.py
send_email
def send_email(recipients, subject, text_content=None, html_content=None, from_email=None, use_base_template=True, category=None, fail_silently=False, language=None, cc=None, bcc=None, attachments=None, headers=None, bypass_queue=False, bypass_hijacking=False, attach_files=None): """ Will send a multi-format email to recipients. Email may be queued through celery """ from django.conf import settings if not bypass_queue and hasattr(settings, 'MAILING_USE_CELERY') and settings.MAILING_USE_CELERY: from celery.execute import send_task return send_task('mailing.queue_send_email',[recipients, subject, text_content, html_content, from_email, use_base_template, category, fail_silently, language if language else translation.get_language(), cc, bcc, attachments, headers, bypass_hijacking, attach_files]) else: header_category_value = '%s%s' % (settings.MAILING_HEADER_CATEGORY_PREFIX if hasattr(settings, 'MAILING_HEADER_CATEGORY_PREFIX') else '', category) # Check for sendgrid support and add category header # -------------------------------- if hasattr(settings, 'MAILING_USE_SENDGRID'): send_grid_support = settings.MAILING_USE_SENDGRID else: send_grid_support = False if not headers: headers = dict() if send_grid_support and category: headers['X-SMTPAPI'] = '{"category": "%s"}' % header_category_value # Check for Mailgun support and add label header # -------------------------------- if hasattr(settings, 'MAILING_USE_MAILGUN'): mailgun_support = settings.MAILING_USE_MAILGUN else: mailgun_support = False if not headers: headers = dict() if mailgun_support and category: headers['X-Mailgun-Tag'] = header_category_value # Ensure recipients are in a list # -------------------------------- if isinstance(recipients, basestring): recipients_list = [recipients] else: recipients_list = recipients # Check if we need to hijack the email # -------------------------------- if hasattr(settings, 'MAILING_MAILTO_HIJACK') and not bypass_hijacking: headers['X-MAILER-ORIGINAL-MAILTO'] = ','.join(recipients_list) recipients_list = [settings.MAILING_MAILTO_HIJACK] if not subject: raise MailerMissingSubjectError('Subject not supplied') # Send ascii, html or multi-part email # -------------------------------- if text_content or html_content: if use_base_template: prev_language = translation.get_language() language and translation.activate(language) text_content = render_to_string('mailing/base.txt', {'mailing_text_body': text_content, 'mailing_subject': subject, 'settings': settings}) if text_content else None html_content = render_to_string('mailing/base.html', {'mailing_html_body': html_content, 'mailing_subject': subject, 'settings': settings}) if html_content else None translation.activate(prev_language) msg = EmailMultiAlternatives(subject, text_content if text_content else html_content, from_email if from_email else settings.DEFAULT_FROM_EMAIL, recipients_list, cc=cc, bcc=bcc, attachments=attachments, headers = headers) if html_content and text_content: msg.attach_alternative(html_content, "text/html") elif html_content: # Only HTML msg.content_subtype = "html" # Attach files through attach_files helper # -------------------------------- if attach_files: for att in attach_files: # attachments are tuples of (filepath, mimetype, filename) with open(att[0], 'rb') as f: content = f.read() msg.attach(att[2], content, att[1]) # Send email # -------------------------------- msg.send(fail_silently=fail_silently) else: raise MailerInvalidBodyError('No text or html body supplied.')
python
def send_email(recipients, subject, text_content=None, html_content=None, from_email=None, use_base_template=True, category=None, fail_silently=False, language=None, cc=None, bcc=None, attachments=None, headers=None, bypass_queue=False, bypass_hijacking=False, attach_files=None): """ Will send a multi-format email to recipients. Email may be queued through celery """ from django.conf import settings if not bypass_queue and hasattr(settings, 'MAILING_USE_CELERY') and settings.MAILING_USE_CELERY: from celery.execute import send_task return send_task('mailing.queue_send_email',[recipients, subject, text_content, html_content, from_email, use_base_template, category, fail_silently, language if language else translation.get_language(), cc, bcc, attachments, headers, bypass_hijacking, attach_files]) else: header_category_value = '%s%s' % (settings.MAILING_HEADER_CATEGORY_PREFIX if hasattr(settings, 'MAILING_HEADER_CATEGORY_PREFIX') else '', category) # Check for sendgrid support and add category header # -------------------------------- if hasattr(settings, 'MAILING_USE_SENDGRID'): send_grid_support = settings.MAILING_USE_SENDGRID else: send_grid_support = False if not headers: headers = dict() if send_grid_support and category: headers['X-SMTPAPI'] = '{"category": "%s"}' % header_category_value # Check for Mailgun support and add label header # -------------------------------- if hasattr(settings, 'MAILING_USE_MAILGUN'): mailgun_support = settings.MAILING_USE_MAILGUN else: mailgun_support = False if not headers: headers = dict() if mailgun_support and category: headers['X-Mailgun-Tag'] = header_category_value # Ensure recipients are in a list # -------------------------------- if isinstance(recipients, basestring): recipients_list = [recipients] else: recipients_list = recipients # Check if we need to hijack the email # -------------------------------- if hasattr(settings, 'MAILING_MAILTO_HIJACK') and not bypass_hijacking: headers['X-MAILER-ORIGINAL-MAILTO'] = ','.join(recipients_list) recipients_list = [settings.MAILING_MAILTO_HIJACK] if not subject: raise MailerMissingSubjectError('Subject not supplied') # Send ascii, html or multi-part email # -------------------------------- if text_content or html_content: if use_base_template: prev_language = translation.get_language() language and translation.activate(language) text_content = render_to_string('mailing/base.txt', {'mailing_text_body': text_content, 'mailing_subject': subject, 'settings': settings}) if text_content else None html_content = render_to_string('mailing/base.html', {'mailing_html_body': html_content, 'mailing_subject': subject, 'settings': settings}) if html_content else None translation.activate(prev_language) msg = EmailMultiAlternatives(subject, text_content if text_content else html_content, from_email if from_email else settings.DEFAULT_FROM_EMAIL, recipients_list, cc=cc, bcc=bcc, attachments=attachments, headers = headers) if html_content and text_content: msg.attach_alternative(html_content, "text/html") elif html_content: # Only HTML msg.content_subtype = "html" # Attach files through attach_files helper # -------------------------------- if attach_files: for att in attach_files: # attachments are tuples of (filepath, mimetype, filename) with open(att[0], 'rb') as f: content = f.read() msg.attach(att[2], content, att[1]) # Send email # -------------------------------- msg.send(fail_silently=fail_silently) else: raise MailerInvalidBodyError('No text or html body supplied.')
[ "def", "send_email", "(", "recipients", ",", "subject", ",", "text_content", "=", "None", ",", "html_content", "=", "None", ",", "from_email", "=", "None", ",", "use_base_template", "=", "True", ",", "category", "=", "None", ",", "fail_silently", "=", "False", ",", "language", "=", "None", ",", "cc", "=", "None", ",", "bcc", "=", "None", ",", "attachments", "=", "None", ",", "headers", "=", "None", ",", "bypass_queue", "=", "False", ",", "bypass_hijacking", "=", "False", ",", "attach_files", "=", "None", ")", ":", "from", "django", ".", "conf", "import", "settings", "if", "not", "bypass_queue", "and", "hasattr", "(", "settings", ",", "'MAILING_USE_CELERY'", ")", "and", "settings", ".", "MAILING_USE_CELERY", ":", "from", "celery", ".", "execute", "import", "send_task", "return", "send_task", "(", "'mailing.queue_send_email'", ",", "[", "recipients", ",", "subject", ",", "text_content", ",", "html_content", ",", "from_email", ",", "use_base_template", ",", "category", ",", "fail_silently", ",", "language", "if", "language", "else", "translation", ".", "get_language", "(", ")", ",", "cc", ",", "bcc", ",", "attachments", ",", "headers", ",", "bypass_hijacking", ",", "attach_files", "]", ")", "else", ":", "header_category_value", "=", "'%s%s'", "%", "(", "settings", ".", "MAILING_HEADER_CATEGORY_PREFIX", "if", "hasattr", "(", "settings", ",", "'MAILING_HEADER_CATEGORY_PREFIX'", ")", "else", "''", ",", "category", ")", "# Check for sendgrid support and add category header\r", "# --------------------------------\r", "if", "hasattr", "(", "settings", ",", "'MAILING_USE_SENDGRID'", ")", ":", "send_grid_support", "=", "settings", ".", "MAILING_USE_SENDGRID", "else", ":", "send_grid_support", "=", "False", "if", "not", "headers", ":", "headers", "=", "dict", "(", ")", "if", "send_grid_support", "and", "category", ":", "headers", "[", "'X-SMTPAPI'", "]", "=", "'{\"category\": \"%s\"}'", "%", "header_category_value", "# Check for Mailgun support and add label header\r", "# --------------------------------\r", "if", "hasattr", "(", "settings", ",", "'MAILING_USE_MAILGUN'", ")", ":", "mailgun_support", "=", "settings", ".", "MAILING_USE_MAILGUN", "else", ":", "mailgun_support", "=", "False", "if", "not", "headers", ":", "headers", "=", "dict", "(", ")", "if", "mailgun_support", "and", "category", ":", "headers", "[", "'X-Mailgun-Tag'", "]", "=", "header_category_value", "# Ensure recipients are in a list\r", "# --------------------------------\r", "if", "isinstance", "(", "recipients", ",", "basestring", ")", ":", "recipients_list", "=", "[", "recipients", "]", "else", ":", "recipients_list", "=", "recipients", "# Check if we need to hijack the email\r", "# --------------------------------\r", "if", "hasattr", "(", "settings", ",", "'MAILING_MAILTO_HIJACK'", ")", "and", "not", "bypass_hijacking", ":", "headers", "[", "'X-MAILER-ORIGINAL-MAILTO'", "]", "=", "','", ".", "join", "(", "recipients_list", ")", "recipients_list", "=", "[", "settings", ".", "MAILING_MAILTO_HIJACK", "]", "if", "not", "subject", ":", "raise", "MailerMissingSubjectError", "(", "'Subject not supplied'", ")", "# Send ascii, html or multi-part email\r", "# --------------------------------\r", "if", "text_content", "or", "html_content", ":", "if", "use_base_template", ":", "prev_language", "=", "translation", ".", "get_language", "(", ")", "language", "and", "translation", ".", "activate", "(", "language", ")", "text_content", "=", "render_to_string", "(", "'mailing/base.txt'", ",", "{", "'mailing_text_body'", ":", "text_content", ",", "'mailing_subject'", ":", "subject", ",", "'settings'", ":", "settings", "}", ")", "if", "text_content", "else", "None", "html_content", "=", "render_to_string", "(", "'mailing/base.html'", ",", "{", "'mailing_html_body'", ":", "html_content", ",", "'mailing_subject'", ":", "subject", ",", "'settings'", ":", "settings", "}", ")", "if", "html_content", "else", "None", "translation", ".", "activate", "(", "prev_language", ")", "msg", "=", "EmailMultiAlternatives", "(", "subject", ",", "text_content", "if", "text_content", "else", "html_content", ",", "from_email", "if", "from_email", "else", "settings", ".", "DEFAULT_FROM_EMAIL", ",", "recipients_list", ",", "cc", "=", "cc", ",", "bcc", "=", "bcc", ",", "attachments", "=", "attachments", ",", "headers", "=", "headers", ")", "if", "html_content", "and", "text_content", ":", "msg", ".", "attach_alternative", "(", "html_content", ",", "\"text/html\"", ")", "elif", "html_content", ":", "# Only HTML\r", "msg", ".", "content_subtype", "=", "\"html\"", "# Attach files through attach_files helper\r", "# --------------------------------\r", "if", "attach_files", ":", "for", "att", "in", "attach_files", ":", "# attachments are tuples of (filepath, mimetype, filename)\r", "with", "open", "(", "att", "[", "0", "]", ",", "'rb'", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "msg", ".", "attach", "(", "att", "[", "2", "]", ",", "content", ",", "att", "[", "1", "]", ")", "# Send email\r", "# --------------------------------\r", "msg", ".", "send", "(", "fail_silently", "=", "fail_silently", ")", "else", ":", "raise", "MailerInvalidBodyError", "(", "'No text or html body supplied.'", ")" ]
Will send a multi-format email to recipients. Email may be queued through celery
[ "Will", "send", "a", "multi", "-", "format", "email", "to", "recipients", ".", "Email", "may", "be", "queued", "through", "celery" ]
train
https://github.com/JeromeParadis/django-mailing/blob/e1773410ab8ab946caf2847c46e025ba43c1ba6c/mailing/mail.py#L23-L103
Riffstation/Flask-Philo-SQLAlchemy
flask_philo_sqlalchemy/connection.py
ConnectionPool.initialize_connections
def initialize_connections(self, scopefunc=None): """ Initialize a database connection by each connection string defined in the configuration file """ for connection_name, connection_string in\ self.app.config['FLASK_PHILO_SQLALCHEMY'].items(): engine = create_engine(connection_string) session = scoped_session(sessionmaker(), scopefunc=scopefunc) session.configure(bind=engine) self.connections[connection_name] = Connection(engine, session)
python
def initialize_connections(self, scopefunc=None): """ Initialize a database connection by each connection string defined in the configuration file """ for connection_name, connection_string in\ self.app.config['FLASK_PHILO_SQLALCHEMY'].items(): engine = create_engine(connection_string) session = scoped_session(sessionmaker(), scopefunc=scopefunc) session.configure(bind=engine) self.connections[connection_name] = Connection(engine, session)
[ "def", "initialize_connections", "(", "self", ",", "scopefunc", "=", "None", ")", ":", "for", "connection_name", ",", "connection_string", "in", "self", ".", "app", ".", "config", "[", "'FLASK_PHILO_SQLALCHEMY'", "]", ".", "items", "(", ")", ":", "engine", "=", "create_engine", "(", "connection_string", ")", "session", "=", "scoped_session", "(", "sessionmaker", "(", ")", ",", "scopefunc", "=", "scopefunc", ")", "session", ".", "configure", "(", "bind", "=", "engine", ")", "self", ".", "connections", "[", "connection_name", "]", "=", "Connection", "(", "engine", ",", "session", ")" ]
Initialize a database connection by each connection string defined in the configuration file
[ "Initialize", "a", "database", "connection", "by", "each", "connection", "string", "defined", "in", "the", "configuration", "file" ]
train
https://github.com/Riffstation/Flask-Philo-SQLAlchemy/blob/71598bb603b8458a2cf9f7989f71d8f1c77fafb9/flask_philo_sqlalchemy/connection.py#L26-L36
benoitkugler/abstractDataLibrary
pyDLib/Core/groups.py
sortableListe.sort
def sort(self, attribut, order=False): """ Implément un tri par attrbut. :param str attribut: Nom du champ concerné :param bool order: Ordre croissant ou décroissant """ value_default = formats.ASSOCIATION[attribut][3] if type(value_default) is str: # case insensitive sort get = lambda d : (d[attribut] or value_default).casefold() elif type(value_default) is dict: #can't sort dicts def get(d): u = d[attribut] or value_default return [str(u[i]) for i in sorted(u.keys())] else: get = lambda d : d[attribut] or value_default list.sort(self, key=get, reverse=order)
python
def sort(self, attribut, order=False): """ Implément un tri par attrbut. :param str attribut: Nom du champ concerné :param bool order: Ordre croissant ou décroissant """ value_default = formats.ASSOCIATION[attribut][3] if type(value_default) is str: # case insensitive sort get = lambda d : (d[attribut] or value_default).casefold() elif type(value_default) is dict: #can't sort dicts def get(d): u = d[attribut] or value_default return [str(u[i]) for i in sorted(u.keys())] else: get = lambda d : d[attribut] or value_default list.sort(self, key=get, reverse=order)
[ "def", "sort", "(", "self", ",", "attribut", ",", "order", "=", "False", ")", ":", "value_default", "=", "formats", ".", "ASSOCIATION", "[", "attribut", "]", "[", "3", "]", "if", "type", "(", "value_default", ")", "is", "str", ":", "# case insensitive sort", "get", "=", "lambda", "d", ":", "(", "d", "[", "attribut", "]", "or", "value_default", ")", ".", "casefold", "(", ")", "elif", "type", "(", "value_default", ")", "is", "dict", ":", "#can't sort dicts", "def", "get", "(", "d", ")", ":", "u", "=", "d", "[", "attribut", "]", "or", "value_default", "return", "[", "str", "(", "u", "[", "i", "]", ")", "for", "i", "in", "sorted", "(", "u", ".", "keys", "(", ")", ")", "]", "else", ":", "get", "=", "lambda", "d", ":", "d", "[", "attribut", "]", "or", "value_default", "list", ".", "sort", "(", "self", ",", "key", "=", "get", ",", "reverse", "=", "order", ")" ]
Implément un tri par attrbut. :param str attribut: Nom du champ concerné :param bool order: Ordre croissant ou décroissant
[ "Implément", "un", "tri", "par", "attrbut", "." ]
train
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/groups.py#L9-L27