id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
18,400
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_pubs
|
def _process_pubs(self, limit):
"""
Flybase publications.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'pub'))
LOG.info("building labels for pubs")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(pub_id, title, volumetitle, volume, series_name, issue, pyear,
pages, miniref, type_id, is_obsolete, publisher, pubplace,
uniquename) = line
# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670
# if self.test_mode is True:
# if int(object_key) not in self.test_keys.get('genotype'):
# continue
pub_num = pub_id
pub_id = 'FlyBase:'+uniquename.strip()
self.idhash['publication'][pub_num] = pub_id
# TODO figure out the type of pub by type_id
if not re.match(r'(FBrf|multi)', uniquename):
continue
line_counter += 1
reference = Reference(graph, pub_id)
if title != '':
reference.setTitle(title)
if pyear != '':
reference.setYear(str(pyear))
if miniref != '':
reference.setShortCitation(miniref)
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode and int(pub_num) not in self.test_keys['pub']:
continue
if is_obsolete == 't':
model.addDeprecatedIndividual(pub_id)
else:
reference.addRefToGraph()
return
|
python
|
def _process_pubs(self, limit):
"""
Flybase publications.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'pub'))
LOG.info("building labels for pubs")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(pub_id, title, volumetitle, volume, series_name, issue, pyear,
pages, miniref, type_id, is_obsolete, publisher, pubplace,
uniquename) = line
# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670
# if self.test_mode is True:
# if int(object_key) not in self.test_keys.get('genotype'):
# continue
pub_num = pub_id
pub_id = 'FlyBase:'+uniquename.strip()
self.idhash['publication'][pub_num] = pub_id
# TODO figure out the type of pub by type_id
if not re.match(r'(FBrf|multi)', uniquename):
continue
line_counter += 1
reference = Reference(graph, pub_id)
if title != '':
reference.setTitle(title)
if pyear != '':
reference.setYear(str(pyear))
if miniref != '':
reference.setShortCitation(miniref)
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
if self.test_mode and int(pub_num) not in self.test_keys['pub']:
continue
if is_obsolete == 't':
model.addDeprecatedIndividual(pub_id)
else:
reference.addRefToGraph()
return
|
[
"def",
"_process_pubs",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'pub'",
")",
")",
"LOG",
".",
"info",
"(",
"\"building labels for pubs\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"line",
"in",
"filereader",
":",
"(",
"pub_id",
",",
"title",
",",
"volumetitle",
",",
"volume",
",",
"series_name",
",",
"issue",
",",
"pyear",
",",
"pages",
",",
"miniref",
",",
"type_id",
",",
"is_obsolete",
",",
"publisher",
",",
"pubplace",
",",
"uniquename",
")",
"=",
"line",
"# 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670",
"# if self.test_mode is True:",
"# if int(object_key) not in self.test_keys.get('genotype'):",
"# continue",
"pub_num",
"=",
"pub_id",
"pub_id",
"=",
"'FlyBase:'",
"+",
"uniquename",
".",
"strip",
"(",
")",
"self",
".",
"idhash",
"[",
"'publication'",
"]",
"[",
"pub_num",
"]",
"=",
"pub_id",
"# TODO figure out the type of pub by type_id",
"if",
"not",
"re",
".",
"match",
"(",
"r'(FBrf|multi)'",
",",
"uniquename",
")",
":",
"continue",
"line_counter",
"+=",
"1",
"reference",
"=",
"Reference",
"(",
"graph",
",",
"pub_id",
")",
"if",
"title",
"!=",
"''",
":",
"reference",
".",
"setTitle",
"(",
"title",
")",
"if",
"pyear",
"!=",
"''",
":",
"reference",
".",
"setYear",
"(",
"str",
"(",
"pyear",
")",
")",
"if",
"miniref",
"!=",
"''",
":",
"reference",
".",
"setShortCitation",
"(",
"miniref",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"pub_num",
")",
"not",
"in",
"self",
".",
"test_keys",
"[",
"'pub'",
"]",
":",
"continue",
"if",
"is_obsolete",
"==",
"'t'",
":",
"model",
".",
"addDeprecatedIndividual",
"(",
"pub_id",
")",
"else",
":",
"reference",
".",
"addRefToGraph",
"(",
")",
"return"
] |
Flybase publications.
:param limit:
:return:
|
[
"Flybase",
"publications",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L483-L539
|
18,401
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_environments
|
def _process_environments(self):
"""
There's only about 30 environments in which the phenotypes
are recorded.
There are no externally accessible identifiers for environments,
so we make anonymous nodes for now.
Some of the environments are comprised of >1 of the other environments;
we do some simple parsing to match the strings of the environmental
labels to the other atomic components.
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'environment'))
LOG.info("building labels for environment")
env_parts = {}
label_map = {}
env = Environment(graph)
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(environment_id, uniquename, description) = line
# 22 heat sensitive | tetracycline conditional
environment_num = environment_id
environment_internal_id = self._makeInternalIdentifier(
'environment', environment_num)
if environment_num not in self.idhash['environment']:
self.idhash['environment'][environment_num] = \
environment_internal_id
environment_id = self.idhash['environment'][environment_num]
environment_label = uniquename
if environment_label == 'unspecified':
environment_label += ' environment'
env.addEnvironment(environment_id, environment_label)
self.label_hash[environment_id] = environment_label
# split up the environment into parts
# if there's parts, then add them to the hash;
# we'll match the components in a second pass
components = re.split(r'\|', uniquename)
if len(components) > 1:
env_parts[environment_id] = components
else:
label_map[environment_label] = environment_id
# ### end loop through file
# build the environmental components
for eid in env_parts:
eid = eid.strip()
for e in env_parts[eid]:
# search for the environmental component by label
env_id = label_map.get(e.strip())
env.addComponentToEnvironment(eid, env_id)
return
|
python
|
def _process_environments(self):
"""
There's only about 30 environments in which the phenotypes
are recorded.
There are no externally accessible identifiers for environments,
so we make anonymous nodes for now.
Some of the environments are comprised of >1 of the other environments;
we do some simple parsing to match the strings of the environmental
labels to the other atomic components.
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'environment'))
LOG.info("building labels for environment")
env_parts = {}
label_map = {}
env = Environment(graph)
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(environment_id, uniquename, description) = line
# 22 heat sensitive | tetracycline conditional
environment_num = environment_id
environment_internal_id = self._makeInternalIdentifier(
'environment', environment_num)
if environment_num not in self.idhash['environment']:
self.idhash['environment'][environment_num] = \
environment_internal_id
environment_id = self.idhash['environment'][environment_num]
environment_label = uniquename
if environment_label == 'unspecified':
environment_label += ' environment'
env.addEnvironment(environment_id, environment_label)
self.label_hash[environment_id] = environment_label
# split up the environment into parts
# if there's parts, then add them to the hash;
# we'll match the components in a second pass
components = re.split(r'\|', uniquename)
if len(components) > 1:
env_parts[environment_id] = components
else:
label_map[environment_label] = environment_id
# ### end loop through file
# build the environmental components
for eid in env_parts:
eid = eid.strip()
for e in env_parts[eid]:
# search for the environmental component by label
env_id = label_map.get(e.strip())
env.addComponentToEnvironment(eid, env_id)
return
|
[
"def",
"_process_environments",
"(",
"self",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'environment'",
")",
")",
"LOG",
".",
"info",
"(",
"\"building labels for environment\"",
")",
"env_parts",
"=",
"{",
"}",
"label_map",
"=",
"{",
"}",
"env",
"=",
"Environment",
"(",
"graph",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"filereader",
":",
"(",
"environment_id",
",",
"uniquename",
",",
"description",
")",
"=",
"line",
"# 22 heat sensitive | tetracycline conditional",
"environment_num",
"=",
"environment_id",
"environment_internal_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'environment'",
",",
"environment_num",
")",
"if",
"environment_num",
"not",
"in",
"self",
".",
"idhash",
"[",
"'environment'",
"]",
":",
"self",
".",
"idhash",
"[",
"'environment'",
"]",
"[",
"environment_num",
"]",
"=",
"environment_internal_id",
"environment_id",
"=",
"self",
".",
"idhash",
"[",
"'environment'",
"]",
"[",
"environment_num",
"]",
"environment_label",
"=",
"uniquename",
"if",
"environment_label",
"==",
"'unspecified'",
":",
"environment_label",
"+=",
"' environment'",
"env",
".",
"addEnvironment",
"(",
"environment_id",
",",
"environment_label",
")",
"self",
".",
"label_hash",
"[",
"environment_id",
"]",
"=",
"environment_label",
"# split up the environment into parts",
"# if there's parts, then add them to the hash;",
"# we'll match the components in a second pass",
"components",
"=",
"re",
".",
"split",
"(",
"r'\\|'",
",",
"uniquename",
")",
"if",
"len",
"(",
"components",
")",
">",
"1",
":",
"env_parts",
"[",
"environment_id",
"]",
"=",
"components",
"else",
":",
"label_map",
"[",
"environment_label",
"]",
"=",
"environment_id",
"# ### end loop through file",
"# build the environmental components",
"for",
"eid",
"in",
"env_parts",
":",
"eid",
"=",
"eid",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"env_parts",
"[",
"eid",
"]",
":",
"# search for the environmental component by label",
"env_id",
"=",
"label_map",
".",
"get",
"(",
"e",
".",
"strip",
"(",
")",
")",
"env",
".",
"addComponentToEnvironment",
"(",
"eid",
",",
"env_id",
")",
"return"
] |
There's only about 30 environments in which the phenotypes
are recorded.
There are no externally accessible identifiers for environments,
so we make anonymous nodes for now.
Some of the environments are comprised of >1 of the other environments;
we do some simple parsing to match the strings of the environmental
labels to the other atomic components.
:return:
|
[
"There",
"s",
"only",
"about",
"30",
"environments",
"in",
"which",
"the",
"phenotypes",
"are",
"recorded",
".",
"There",
"are",
"no",
"externally",
"accessible",
"identifiers",
"for",
"environments",
"so",
"we",
"make",
"anonymous",
"nodes",
"for",
"now",
".",
"Some",
"of",
"the",
"environments",
"are",
"comprised",
"of",
">",
"1",
"of",
"the",
"other",
"environments",
";",
"we",
"do",
"some",
"simple",
"parsing",
"to",
"match",
"the",
"strings",
"of",
"the",
"environmental",
"labels",
"to",
"the",
"other",
"atomic",
"components",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L542-L604
|
18,402
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_stock_genotype
|
def _process_stock_genotype(self, limit):
"""
The genotypes of the stocks.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'stock_genotype'))
LOG.info("processing stock genotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(stock_genotype_id, stock_id, genotype_id) = line
stock_key = stock_id
stock_id = self.idhash['stock'][stock_key]
genotype_key = genotype_id
genotype_id = self.idhash['genotype'][genotype_key]
if self.test_mode \
and int(genotype_key) not in self.test_keys['genotype']:
continue
graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id)
line_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
python
|
def _process_stock_genotype(self, limit):
"""
The genotypes of the stocks.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
raw = '/'.join((self.rawdir, 'stock_genotype'))
LOG.info("processing stock genotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(stock_genotype_id, stock_id, genotype_id) = line
stock_key = stock_id
stock_id = self.idhash['stock'][stock_key]
genotype_key = genotype_id
genotype_id = self.idhash['genotype'][genotype_key]
if self.test_mode \
and int(genotype_key) not in self.test_keys['genotype']:
continue
graph.addTriple(stock_id, self.globaltt['has_genotype'], genotype_id)
line_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_stock_genotype",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'stock_genotype'",
")",
")",
"LOG",
".",
"info",
"(",
"\"processing stock genotype\"",
")",
"line_counter",
"=",
"0",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"filereader",
":",
"(",
"stock_genotype_id",
",",
"stock_id",
",",
"genotype_id",
")",
"=",
"line",
"stock_key",
"=",
"stock_id",
"stock_id",
"=",
"self",
".",
"idhash",
"[",
"'stock'",
"]",
"[",
"stock_key",
"]",
"genotype_key",
"=",
"genotype_id",
"genotype_id",
"=",
"self",
".",
"idhash",
"[",
"'genotype'",
"]",
"[",
"genotype_key",
"]",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"genotype_key",
")",
"not",
"in",
"self",
".",
"test_keys",
"[",
"'genotype'",
"]",
":",
"continue",
"graph",
".",
"addTriple",
"(",
"stock_id",
",",
"self",
".",
"globaltt",
"[",
"'has_genotype'",
"]",
",",
"genotype_id",
")",
"line_counter",
"+=",
"1",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
The genotypes of the stocks.
:param limit:
:return:
|
[
"The",
"genotypes",
"of",
"the",
"stocks",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L926-L965
|
18,403
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_dbxref
|
def _process_dbxref(self):
"""
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
"""
raw = '/'.join((self.rawdir, 'dbxref'))
LOG.info("processing dbxrefs")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(dbxref_id, db_id, accession, version, description, url) = line
# dbxref_id db_id accession version description url
# 1 2 SO:0000000 ""
accession = accession.strip()
db_id = db_id.strip()
if accession != '' and db_id in self.localtt:
# scrub some identifiers here
mch = re.match(
r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',
accession)
if mch:
accession = re.sub(mch.group(1)+r'\:', '', accession)
elif re.match(
r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',
accession):
continue
elif re.match(r'\:', accession): # starts with a colon
accession = re.sub(r'\:', '', accession)
elif re.search(r'\s', accession):
# skip anything with a space
# LOG.debug(
# 'dbxref %s accession has a space: %s', dbxref_id, accession)
continue
if re.match(r'http', accession):
did = accession
else:
prefix = self.localtt[db_id]
did = ':'.join((prefix, accession))
if re.search(r'\:', accession) and prefix != 'DOI':
LOG.warning('id %s may be malformed; skipping', did)
self.dbxrefs[dbxref_id] = {db_id: did}
elif url != '':
self.dbxrefs[dbxref_id] = {db_id: url.strip()}
else:
continue
# the following are some special cases that we scrub
if int(db_id) == 2 and accession.strip() == 'transgenic_transposon':
# transgenic_transposable_element
self.dbxrefs[dbxref_id] = {
db_id: self.globaltt['transgenic_transposable_element']}
line_counter += 1
return
|
python
|
def _process_dbxref(self):
"""
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
"""
raw = '/'.join((self.rawdir, 'dbxref'))
LOG.info("processing dbxrefs")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(dbxref_id, db_id, accession, version, description, url) = line
# dbxref_id db_id accession version description url
# 1 2 SO:0000000 ""
accession = accession.strip()
db_id = db_id.strip()
if accession != '' and db_id in self.localtt:
# scrub some identifiers here
mch = re.match(
r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',
accession)
if mch:
accession = re.sub(mch.group(1)+r'\:', '', accession)
elif re.match(
r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',
accession):
continue
elif re.match(r'\:', accession): # starts with a colon
accession = re.sub(r'\:', '', accession)
elif re.search(r'\s', accession):
# skip anything with a space
# LOG.debug(
# 'dbxref %s accession has a space: %s', dbxref_id, accession)
continue
if re.match(r'http', accession):
did = accession
else:
prefix = self.localtt[db_id]
did = ':'.join((prefix, accession))
if re.search(r'\:', accession) and prefix != 'DOI':
LOG.warning('id %s may be malformed; skipping', did)
self.dbxrefs[dbxref_id] = {db_id: did}
elif url != '':
self.dbxrefs[dbxref_id] = {db_id: url.strip()}
else:
continue
# the following are some special cases that we scrub
if int(db_id) == 2 and accession.strip() == 'transgenic_transposon':
# transgenic_transposable_element
self.dbxrefs[dbxref_id] = {
db_id: self.globaltt['transgenic_transposable_element']}
line_counter += 1
return
|
[
"def",
"_process_dbxref",
"(",
"self",
")",
":",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'dbxref'",
")",
")",
"LOG",
".",
"info",
"(",
"\"processing dbxrefs\"",
")",
"line_counter",
"=",
"0",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"filereader",
":",
"(",
"dbxref_id",
",",
"db_id",
",",
"accession",
",",
"version",
",",
"description",
",",
"url",
")",
"=",
"line",
"# dbxref_id\tdb_id\taccession\tversion\tdescription\turl",
"# 1\t2\tSO:0000000\t\"\"",
"accession",
"=",
"accession",
".",
"strip",
"(",
")",
"db_id",
"=",
"db_id",
".",
"strip",
"(",
")",
"if",
"accession",
"!=",
"''",
"and",
"db_id",
"in",
"self",
".",
"localtt",
":",
"# scrub some identifiers here",
"mch",
"=",
"re",
".",
"match",
"(",
"r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):'",
",",
"accession",
")",
"if",
"mch",
":",
"accession",
"=",
"re",
".",
"sub",
"(",
"mch",
".",
"group",
"(",
"1",
")",
"+",
"r'\\:'",
",",
"''",
",",
"accession",
")",
"elif",
"re",
".",
"match",
"(",
"r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)'",
",",
"accession",
")",
":",
"continue",
"elif",
"re",
".",
"match",
"(",
"r'\\:'",
",",
"accession",
")",
":",
"# starts with a colon",
"accession",
"=",
"re",
".",
"sub",
"(",
"r'\\:'",
",",
"''",
",",
"accession",
")",
"elif",
"re",
".",
"search",
"(",
"r'\\s'",
",",
"accession",
")",
":",
"# skip anything with a space",
"# LOG.debug(",
"# 'dbxref %s accession has a space: %s', dbxref_id, accession)",
"continue",
"if",
"re",
".",
"match",
"(",
"r'http'",
",",
"accession",
")",
":",
"did",
"=",
"accession",
"else",
":",
"prefix",
"=",
"self",
".",
"localtt",
"[",
"db_id",
"]",
"did",
"=",
"':'",
".",
"join",
"(",
"(",
"prefix",
",",
"accession",
")",
")",
"if",
"re",
".",
"search",
"(",
"r'\\:'",
",",
"accession",
")",
"and",
"prefix",
"!=",
"'DOI'",
":",
"LOG",
".",
"warning",
"(",
"'id %s may be malformed; skipping'",
",",
"did",
")",
"self",
".",
"dbxrefs",
"[",
"dbxref_id",
"]",
"=",
"{",
"db_id",
":",
"did",
"}",
"elif",
"url",
"!=",
"''",
":",
"self",
".",
"dbxrefs",
"[",
"dbxref_id",
"]",
"=",
"{",
"db_id",
":",
"url",
".",
"strip",
"(",
")",
"}",
"else",
":",
"continue",
"# the following are some special cases that we scrub",
"if",
"int",
"(",
"db_id",
")",
"==",
"2",
"and",
"accession",
".",
"strip",
"(",
")",
"==",
"'transgenic_transposon'",
":",
"# transgenic_transposable_element",
"self",
".",
"dbxrefs",
"[",
"dbxref_id",
"]",
"=",
"{",
"db_id",
":",
"self",
".",
"globaltt",
"[",
"'transgenic_transposable_element'",
"]",
"}",
"line_counter",
"+=",
"1",
"return"
] |
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
|
[
"We",
"bring",
"in",
"the",
"dbxref",
"identifiers",
"and",
"store",
"them",
"in",
"a",
"hashmap",
"for",
"lookup",
"in",
"other",
"functions",
".",
"Note",
"that",
"some",
"dbxrefs",
"aren",
"t",
"mapped",
"to",
"identifiers",
".",
"For",
"example",
"5004018",
"is",
"mapped",
"to",
"a",
"string",
"endosome",
"&",
"imaginal",
"disc",
"epithelial",
"cell",
"|",
"somatic",
"clone",
"...",
"In",
"those",
"cases",
"there",
"just",
"isn",
"t",
"a",
"dbxref",
"that",
"s",
"used",
"when",
"referencing",
"with",
"a",
"cvterm",
";",
"it",
"ll",
"just",
"use",
"the",
"internal",
"key",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1033-L1103
|
18,404
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_phenotype
|
def _process_phenotype(self, limit):
"""
Get the phenotypes, and declare the classes.
If the "observable" is "unspecified", then we assign the phenotype to
the "cvalue" id; otherwise we convert the phenotype into a
uberpheno-style identifier, simply based on the anatomical part that's
affected...that is listed as the observable_id, concatenated with
the literal "PHENOTYPE"
Note that some of the phenotypes no not have a dbxref to a FBcv;
for these cases it will make a node with an anonymous node with an
internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward,
but not sure how else to construct identifiers.
Maybe they should be fed back into Upheno and then leveraged by FB?
Note that assay_id is the same for all current items,
so we do nothing with this.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'phenotype'))
LOG.info("processing phenotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(phenotype_id, uniquename, observable_id, attr_id, value,
cvalue_id, assay_id) = line
# 8505 unspecified
# 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468
# 8507 sex comb | ectopic 88877 60468 60468 60468
# 8508 tarsal segment 83664 60468 60468 60468
# 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468
# for now make these as phenotypic classes
# will need to dbxref at some point
phenotype_key = phenotype_id
phenotype_id = None
phenotype_internal_id = self._makeInternalIdentifier(
'phenotype', phenotype_key)
phenotype_label = None
self.label_hash[phenotype_internal_id] = uniquename
cvterm_id = None
if observable_id != '' and int(observable_id) == 60468:
# undefined - typically these are already phenotypes
if cvalue_id in self.idhash['cvterm']:
cvterm_id = self.idhash['cvterm'][cvalue_id]
phenotype_id = self.idhash['cvterm'][cvalue_id]
elif observable_id in self.idhash['cvterm']:
# observations to anatomical classes
cvterm_id = self.idhash['cvterm'][observable_id]
phenotype_id = self.idhash['cvterm'][observable_id] + 'PHENOTYPE'
if cvterm_id is not None and cvterm_id in self.label_hash:
phenotype_label = self.label_hash[cvterm_id]
phenotype_label += ' phenotype'
self.label_hash[phenotype_id] = phenotype_label
else:
LOG.info('cvtermid=%s not in label_hash', cvterm_id)
else:
LOG.info(
"No observable id or label for %s: %s",
phenotype_key, uniquename)
# TODO store this composite phenotype in some way
# as a proper class definition?
self.idhash['phenotype'][phenotype_key] = phenotype_id
# assay_id is currently only "undefined" key=60468
if not self.test_mode and\
limit is not None and line_counter > limit:
pass
else:
if phenotype_id is not None:
# assume that these fit into the phenotypic uberpheno
# elsewhere
model.addClassToGraph(phenotype_id, phenotype_label)
line_counter += 1
return
|
python
|
def _process_phenotype(self, limit):
"""
Get the phenotypes, and declare the classes.
If the "observable" is "unspecified", then we assign the phenotype to
the "cvalue" id; otherwise we convert the phenotype into a
uberpheno-style identifier, simply based on the anatomical part that's
affected...that is listed as the observable_id, concatenated with
the literal "PHENOTYPE"
Note that some of the phenotypes no not have a dbxref to a FBcv;
for these cases it will make a node with an anonymous node with an
internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward,
but not sure how else to construct identifiers.
Maybe they should be fed back into Upheno and then leveraged by FB?
Note that assay_id is the same for all current items,
so we do nothing with this.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'phenotype'))
LOG.info("processing phenotype")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(phenotype_id, uniquename, observable_id, attr_id, value,
cvalue_id, assay_id) = line
# 8505 unspecified
# 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468
# 8507 sex comb | ectopic 88877 60468 60468 60468
# 8508 tarsal segment 83664 60468 60468 60468
# 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468
# for now make these as phenotypic classes
# will need to dbxref at some point
phenotype_key = phenotype_id
phenotype_id = None
phenotype_internal_id = self._makeInternalIdentifier(
'phenotype', phenotype_key)
phenotype_label = None
self.label_hash[phenotype_internal_id] = uniquename
cvterm_id = None
if observable_id != '' and int(observable_id) == 60468:
# undefined - typically these are already phenotypes
if cvalue_id in self.idhash['cvterm']:
cvterm_id = self.idhash['cvterm'][cvalue_id]
phenotype_id = self.idhash['cvterm'][cvalue_id]
elif observable_id in self.idhash['cvterm']:
# observations to anatomical classes
cvterm_id = self.idhash['cvterm'][observable_id]
phenotype_id = self.idhash['cvterm'][observable_id] + 'PHENOTYPE'
if cvterm_id is not None and cvterm_id in self.label_hash:
phenotype_label = self.label_hash[cvterm_id]
phenotype_label += ' phenotype'
self.label_hash[phenotype_id] = phenotype_label
else:
LOG.info('cvtermid=%s not in label_hash', cvterm_id)
else:
LOG.info(
"No observable id or label for %s: %s",
phenotype_key, uniquename)
# TODO store this composite phenotype in some way
# as a proper class definition?
self.idhash['phenotype'][phenotype_key] = phenotype_id
# assay_id is currently only "undefined" key=60468
if not self.test_mode and\
limit is not None and line_counter > limit:
pass
else:
if phenotype_id is not None:
# assume that these fit into the phenotypic uberpheno
# elsewhere
model.addClassToGraph(phenotype_id, phenotype_label)
line_counter += 1
return
|
[
"def",
"_process_phenotype",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'phenotype'",
")",
")",
"LOG",
".",
"info",
"(",
"\"processing phenotype\"",
")",
"line_counter",
"=",
"0",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"filereader",
":",
"(",
"phenotype_id",
",",
"uniquename",
",",
"observable_id",
",",
"attr_id",
",",
"value",
",",
"cvalue_id",
",",
"assay_id",
")",
"=",
"line",
"# 8505\tunspecified",
"# 20142\tmesothoracic leg disc | somatic clone 87719 60468 60468 60468",
"# 8507\tsex comb | ectopic 88877 60468 60468 60468",
"# 8508\ttarsal segment\t83664 60468 60468 60468",
"# 18404\toocyte | oogenesis stage S9\t86769 60468 60468 60468",
"# for now make these as phenotypic classes",
"# will need to dbxref at some point",
"phenotype_key",
"=",
"phenotype_id",
"phenotype_id",
"=",
"None",
"phenotype_internal_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'phenotype'",
",",
"phenotype_key",
")",
"phenotype_label",
"=",
"None",
"self",
".",
"label_hash",
"[",
"phenotype_internal_id",
"]",
"=",
"uniquename",
"cvterm_id",
"=",
"None",
"if",
"observable_id",
"!=",
"''",
"and",
"int",
"(",
"observable_id",
")",
"==",
"60468",
":",
"# undefined - typically these are already phenotypes",
"if",
"cvalue_id",
"in",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
":",
"cvterm_id",
"=",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"cvalue_id",
"]",
"phenotype_id",
"=",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"cvalue_id",
"]",
"elif",
"observable_id",
"in",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
":",
"# observations to anatomical classes",
"cvterm_id",
"=",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"observable_id",
"]",
"phenotype_id",
"=",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"observable_id",
"]",
"+",
"'PHENOTYPE'",
"if",
"cvterm_id",
"is",
"not",
"None",
"and",
"cvterm_id",
"in",
"self",
".",
"label_hash",
":",
"phenotype_label",
"=",
"self",
".",
"label_hash",
"[",
"cvterm_id",
"]",
"phenotype_label",
"+=",
"' phenotype'",
"self",
".",
"label_hash",
"[",
"phenotype_id",
"]",
"=",
"phenotype_label",
"else",
":",
"LOG",
".",
"info",
"(",
"'cvtermid=%s not in label_hash'",
",",
"cvterm_id",
")",
"else",
":",
"LOG",
".",
"info",
"(",
"\"No observable id or label for %s: %s\"",
",",
"phenotype_key",
",",
"uniquename",
")",
"# TODO store this composite phenotype in some way",
"# as a proper class definition?",
"self",
".",
"idhash",
"[",
"'phenotype'",
"]",
"[",
"phenotype_key",
"]",
"=",
"phenotype_id",
"# assay_id is currently only \"undefined\" key=60468",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"if",
"phenotype_id",
"is",
"not",
"None",
":",
"# assume that these fit into the phenotypic uberpheno",
"# elsewhere",
"model",
".",
"addClassToGraph",
"(",
"phenotype_id",
",",
"phenotype_label",
")",
"line_counter",
"+=",
"1",
"return"
] |
Get the phenotypes, and declare the classes.
If the "observable" is "unspecified", then we assign the phenotype to
the "cvalue" id; otherwise we convert the phenotype into a
uberpheno-style identifier, simply based on the anatomical part that's
affected...that is listed as the observable_id, concatenated with
the literal "PHENOTYPE"
Note that some of the phenotypes no not have a dbxref to a FBcv;
for these cases it will make a node with an anonymous node with an
internal id like, "_fbcvtermkey100920PHENOTYPE". This is awkward,
but not sure how else to construct identifiers.
Maybe they should be fed back into Upheno and then leveraged by FB?
Note that assay_id is the same for all current items,
so we do nothing with this.
:param limit:
:return:
|
[
"Get",
"the",
"phenotypes",
"and",
"declare",
"the",
"classes",
".",
"If",
"the",
"observable",
"is",
"unspecified",
"then",
"we",
"assign",
"the",
"phenotype",
"to",
"the",
"cvalue",
"id",
";",
"otherwise",
"we",
"convert",
"the",
"phenotype",
"into",
"a",
"uberpheno",
"-",
"style",
"identifier",
"simply",
"based",
"on",
"the",
"anatomical",
"part",
"that",
"s",
"affected",
"...",
"that",
"is",
"listed",
"as",
"the",
"observable_id",
"concatenated",
"with",
"the",
"literal",
"PHENOTYPE"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1105-L1195
|
18,405
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_cvterm
|
def _process_cvterm(self):
"""
CVterms are the internal identifiers for any controlled vocab
or ontology term. Many are xrefd to actual ontologies. The actual
external id is stored in the dbxref table, which we place into
the internal hashmap for lookup with the cvterm id. The name of
the external term is stored in the "name" element of this table, and
we add that to the label hashmap for lookup elsewhere
:return:
"""
line_counter = 0
raw = '/'.join((self.rawdir, 'cvterm'))
LOG.info("processing cvterms")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(cvterm_id, cv_id, definition, dbxref_id, is_obsolete,
is_relationshiptype, name) = line
# 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript
# 28 5 1663309 0 0 synonym
# 455 6 1665920 0 0 tmRNA
# not sure the following is necessary
# cv_prefixes = {
# 6 : 'SO',
# 20: 'FBcv',
# 28: 'GO',
# 29: 'GO',
# 30: 'GO',
# 31: 'FBcv', # not actually FBcv - I think FBbt.
# 32: 'FBdv',
# 37: 'GO', # these are relationships
# 73: 'DOID'
# }
# if int(cv_id) not in cv_prefixes:
# continue
cvterm_key = cvterm_id
cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key)
self.label_hash[cvterm_id] = name
self.idhash['cvterm'][cvterm_key] = cvterm_id
# look up the dbxref_id for the cvterm
# hopefully it's one-to-one
dbxrefs = self.dbxrefs.get(dbxref_id)
if dbxrefs is not None:
if len(dbxrefs) > 1:
LOG.info(
">1 dbxref for this cvterm (%s: %s): %s",
str(cvterm_id), name, dbxrefs.values())
elif len(dbxrefs) == 1:
# replace the cvterm with
# the dbxref (external) identifier
did = dbxrefs.popitem()[1]
# get the value
self.idhash['cvterm'][cvterm_key] = did
# also add the label to the dbxref
self.label_hash[did] = name
return
|
python
|
def _process_cvterm(self):
"""
CVterms are the internal identifiers for any controlled vocab
or ontology term. Many are xrefd to actual ontologies. The actual
external id is stored in the dbxref table, which we place into
the internal hashmap for lookup with the cvterm id. The name of
the external term is stored in the "name" element of this table, and
we add that to the label hashmap for lookup elsewhere
:return:
"""
line_counter = 0
raw = '/'.join((self.rawdir, 'cvterm'))
LOG.info("processing cvterms")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
(cvterm_id, cv_id, definition, dbxref_id, is_obsolete,
is_relationshiptype, name) = line
# 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript
# 28 5 1663309 0 0 synonym
# 455 6 1665920 0 0 tmRNA
# not sure the following is necessary
# cv_prefixes = {
# 6 : 'SO',
# 20: 'FBcv',
# 28: 'GO',
# 29: 'GO',
# 30: 'GO',
# 31: 'FBcv', # not actually FBcv - I think FBbt.
# 32: 'FBdv',
# 37: 'GO', # these are relationships
# 73: 'DOID'
# }
# if int(cv_id) not in cv_prefixes:
# continue
cvterm_key = cvterm_id
cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key)
self.label_hash[cvterm_id] = name
self.idhash['cvterm'][cvterm_key] = cvterm_id
# look up the dbxref_id for the cvterm
# hopefully it's one-to-one
dbxrefs = self.dbxrefs.get(dbxref_id)
if dbxrefs is not None:
if len(dbxrefs) > 1:
LOG.info(
">1 dbxref for this cvterm (%s: %s): %s",
str(cvterm_id), name, dbxrefs.values())
elif len(dbxrefs) == 1:
# replace the cvterm with
# the dbxref (external) identifier
did = dbxrefs.popitem()[1]
# get the value
self.idhash['cvterm'][cvterm_key] = did
# also add the label to the dbxref
self.label_hash[did] = name
return
|
[
"def",
"_process_cvterm",
"(",
"self",
")",
":",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'cvterm'",
")",
")",
"LOG",
".",
"info",
"(",
"\"processing cvterms\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"line",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"cvterm_id",
",",
"cv_id",
",",
"definition",
",",
"dbxref_id",
",",
"is_obsolete",
",",
"is_relationshiptype",
",",
"name",
")",
"=",
"line",
"# 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript",
"# 28 5 1663309 0 0 synonym",
"# 455 6 1665920 0 0 tmRNA",
"# not sure the following is necessary",
"# cv_prefixes = {",
"# 6 : 'SO',",
"# 20: 'FBcv',",
"# 28: 'GO',",
"# 29: 'GO',",
"# 30: 'GO',",
"# 31: 'FBcv', # not actually FBcv - I think FBbt.",
"# 32: 'FBdv',",
"# 37: 'GO', # these are relationships",
"# 73: 'DOID'",
"# }",
"# if int(cv_id) not in cv_prefixes:",
"# continue",
"cvterm_key",
"=",
"cvterm_id",
"cvterm_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'cvterm'",
",",
"cvterm_key",
")",
"self",
".",
"label_hash",
"[",
"cvterm_id",
"]",
"=",
"name",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"cvterm_key",
"]",
"=",
"cvterm_id",
"# look up the dbxref_id for the cvterm",
"# hopefully it's one-to-one",
"dbxrefs",
"=",
"self",
".",
"dbxrefs",
".",
"get",
"(",
"dbxref_id",
")",
"if",
"dbxrefs",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"dbxrefs",
")",
">",
"1",
":",
"LOG",
".",
"info",
"(",
"\">1 dbxref for this cvterm (%s: %s): %s\"",
",",
"str",
"(",
"cvterm_id",
")",
",",
"name",
",",
"dbxrefs",
".",
"values",
"(",
")",
")",
"elif",
"len",
"(",
"dbxrefs",
")",
"==",
"1",
":",
"# replace the cvterm with",
"# the dbxref (external) identifier",
"did",
"=",
"dbxrefs",
".",
"popitem",
"(",
")",
"[",
"1",
"]",
"# get the value",
"self",
".",
"idhash",
"[",
"'cvterm'",
"]",
"[",
"cvterm_key",
"]",
"=",
"did",
"# also add the label to the dbxref",
"self",
".",
"label_hash",
"[",
"did",
"]",
"=",
"name",
"return"
] |
CVterms are the internal identifiers for any controlled vocab
or ontology term. Many are xrefd to actual ontologies. The actual
external id is stored in the dbxref table, which we place into
the internal hashmap for lookup with the cvterm id. The name of
the external term is stored in the "name" element of this table, and
we add that to the label hashmap for lookup elsewhere
:return:
|
[
"CVterms",
"are",
"the",
"internal",
"identifiers",
"for",
"any",
"controlled",
"vocab",
"or",
"ontology",
"term",
".",
"Many",
"are",
"xrefd",
"to",
"actual",
"ontologies",
".",
"The",
"actual",
"external",
"id",
"is",
"stored",
"in",
"the",
"dbxref",
"table",
"which",
"we",
"place",
"into",
"the",
"internal",
"hashmap",
"for",
"lookup",
"with",
"the",
"cvterm",
"id",
".",
"The",
"name",
"of",
"the",
"external",
"term",
"is",
"stored",
"in",
"the",
"name",
"element",
"of",
"this",
"table",
"and",
"we",
"add",
"that",
"to",
"the",
"label",
"hashmap",
"for",
"lookup",
"elsewhere"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1324-L1389
|
18,406
|
monarch-initiative/dipper
|
dipper/sources/FlyBase.py
|
FlyBase._process_organisms
|
def _process_organisms(self, limit):
"""
The internal identifiers for the organisms in flybase
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'organism'))
LOG.info("processing organisms")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(organism_id, abbreviation, genus, species, common_name,
comment) = line
# 1 Dmel Drosophila melanogaster fruit fly
# 2 Comp Computational result
line_counter += 1
tax_internal_id = self._makeInternalIdentifier('organism', organism_id)
tax_label = ' '.join((genus, species))
tax_id = tax_internal_id
self.idhash['organism'][organism_id] = tax_id
self.label_hash[tax_id] = tax_label
# we won't actually add the organism to the graph,
# unless we actually use it therefore it is added outside of
# this function
if self.test_mode and int(organism_id) not in self.test_keys['organism']:
continue
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
model.addClassToGraph(tax_id)
for s in [common_name, abbreviation]:
if s is not None and s.strip() != '':
model.addSynonym(tax_id, s)
model.addComment(tax_id, tax_internal_id)
return
|
python
|
def _process_organisms(self, limit):
"""
The internal identifiers for the organisms in flybase
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'organism'))
LOG.info("processing organisms")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(organism_id, abbreviation, genus, species, common_name,
comment) = line
# 1 Dmel Drosophila melanogaster fruit fly
# 2 Comp Computational result
line_counter += 1
tax_internal_id = self._makeInternalIdentifier('organism', organism_id)
tax_label = ' '.join((genus, species))
tax_id = tax_internal_id
self.idhash['organism'][organism_id] = tax_id
self.label_hash[tax_id] = tax_label
# we won't actually add the organism to the graph,
# unless we actually use it therefore it is added outside of
# this function
if self.test_mode and int(organism_id) not in self.test_keys['organism']:
continue
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
model.addClassToGraph(tax_id)
for s in [common_name, abbreviation]:
if s is not None and s.strip() != '':
model.addSynonym(tax_id, s)
model.addComment(tax_id, tax_internal_id)
return
|
[
"def",
"_process_organisms",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'organism'",
")",
")",
"LOG",
".",
"info",
"(",
"\"processing organisms\"",
")",
"line_counter",
"=",
"0",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"filereader",
":",
"(",
"organism_id",
",",
"abbreviation",
",",
"genus",
",",
"species",
",",
"common_name",
",",
"comment",
")",
"=",
"line",
"# 1\tDmel\tDrosophila\tmelanogaster\tfruit fly",
"# 2\tComp\tComputational\tresult",
"line_counter",
"+=",
"1",
"tax_internal_id",
"=",
"self",
".",
"_makeInternalIdentifier",
"(",
"'organism'",
",",
"organism_id",
")",
"tax_label",
"=",
"' '",
".",
"join",
"(",
"(",
"genus",
",",
"species",
")",
")",
"tax_id",
"=",
"tax_internal_id",
"self",
".",
"idhash",
"[",
"'organism'",
"]",
"[",
"organism_id",
"]",
"=",
"tax_id",
"self",
".",
"label_hash",
"[",
"tax_id",
"]",
"=",
"tax_label",
"# we won't actually add the organism to the graph,",
"# unless we actually use it therefore it is added outside of",
"# this function",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"organism_id",
")",
"not",
"in",
"self",
".",
"test_keys",
"[",
"'organism'",
"]",
":",
"continue",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"pass",
"else",
":",
"model",
".",
"addClassToGraph",
"(",
"tax_id",
")",
"for",
"s",
"in",
"[",
"common_name",
",",
"abbreviation",
"]",
":",
"if",
"s",
"is",
"not",
"None",
"and",
"s",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"model",
".",
"addSynonym",
"(",
"tax_id",
",",
"s",
")",
"model",
".",
"addComment",
"(",
"tax_id",
",",
"tax_internal_id",
")",
"return"
] |
The internal identifiers for the organisms in flybase
:param limit:
:return:
|
[
"The",
"internal",
"identifiers",
"for",
"the",
"organisms",
"in",
"flybase"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1792-L1844
|
18,407
|
monarch-initiative/dipper
|
dipper/sources/NCBIGene.py
|
NCBIGene._add_gene_equivalencies
|
def _add_gene_equivalencies(self, xrefs, gene_id, taxon):
"""
Add equivalentClass and sameAs relationships
Uses external resource map located in
/resources/clique_leader.yaml to determine
if an NCBITaxon ID space is a clique leader
"""
clique_map = self.open_and_parse_yaml(self.resources['clique_leader'])
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
filter_out = ['Vega', 'IMGT/GENE-DB', 'Araport']
# deal with the dbxrefs
# MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696
for dbxref in xrefs.strip().split('|'):
prefix = ':'.join(dbxref.split(':')[:-1]).strip()
if prefix in self.localtt:
prefix = self.localtt[prefix]
dbxref_curie = ':'.join((prefix, dbxref.split(':')[-1]))
if dbxref_curie is not None and prefix != '':
if prefix == 'HPRD': # proteins are not == genes.
model.addTriple(
gene_id, self.globaltt['has gene product'], dbxref_curie)
continue
# skip some of these for now based on curie prefix
if prefix in filter_out:
continue
if prefix == 'ENSEMBL':
model.addXref(gene_id, dbxref_curie)
if prefix == 'OMIM':
if DipperUtil.is_omim_disease(dbxref_curie):
continue
try:
if self.class_or_indiv.get(gene_id) == 'C':
model.addEquivalentClass(gene_id, dbxref_curie)
if taxon in clique_map:
if clique_map[taxon] == prefix:
model.makeLeader(dbxref_curie)
elif clique_map[taxon] == gene_id.split(':')[0]:
model.makeLeader(gene_id)
else:
model.addSameIndividual(gene_id, dbxref_curie)
except AssertionError as err:
LOG.warning("Error parsing %s: %s", gene_id, err)
return
|
python
|
def _add_gene_equivalencies(self, xrefs, gene_id, taxon):
"""
Add equivalentClass and sameAs relationships
Uses external resource map located in
/resources/clique_leader.yaml to determine
if an NCBITaxon ID space is a clique leader
"""
clique_map = self.open_and_parse_yaml(self.resources['clique_leader'])
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
filter_out = ['Vega', 'IMGT/GENE-DB', 'Araport']
# deal with the dbxrefs
# MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696
for dbxref in xrefs.strip().split('|'):
prefix = ':'.join(dbxref.split(':')[:-1]).strip()
if prefix in self.localtt:
prefix = self.localtt[prefix]
dbxref_curie = ':'.join((prefix, dbxref.split(':')[-1]))
if dbxref_curie is not None and prefix != '':
if prefix == 'HPRD': # proteins are not == genes.
model.addTriple(
gene_id, self.globaltt['has gene product'], dbxref_curie)
continue
# skip some of these for now based on curie prefix
if prefix in filter_out:
continue
if prefix == 'ENSEMBL':
model.addXref(gene_id, dbxref_curie)
if prefix == 'OMIM':
if DipperUtil.is_omim_disease(dbxref_curie):
continue
try:
if self.class_or_indiv.get(gene_id) == 'C':
model.addEquivalentClass(gene_id, dbxref_curie)
if taxon in clique_map:
if clique_map[taxon] == prefix:
model.makeLeader(dbxref_curie)
elif clique_map[taxon] == gene_id.split(':')[0]:
model.makeLeader(gene_id)
else:
model.addSameIndividual(gene_id, dbxref_curie)
except AssertionError as err:
LOG.warning("Error parsing %s: %s", gene_id, err)
return
|
[
"def",
"_add_gene_equivalencies",
"(",
"self",
",",
"xrefs",
",",
"gene_id",
",",
"taxon",
")",
":",
"clique_map",
"=",
"self",
".",
"open_and_parse_yaml",
"(",
"self",
".",
"resources",
"[",
"'clique_leader'",
"]",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"filter_out",
"=",
"[",
"'Vega'",
",",
"'IMGT/GENE-DB'",
",",
"'Araport'",
"]",
"# deal with the dbxrefs",
"# MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696",
"for",
"dbxref",
"in",
"xrefs",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'|'",
")",
":",
"prefix",
"=",
"':'",
".",
"join",
"(",
"dbxref",
".",
"split",
"(",
"':'",
")",
"[",
":",
"-",
"1",
"]",
")",
".",
"strip",
"(",
")",
"if",
"prefix",
"in",
"self",
".",
"localtt",
":",
"prefix",
"=",
"self",
".",
"localtt",
"[",
"prefix",
"]",
"dbxref_curie",
"=",
"':'",
".",
"join",
"(",
"(",
"prefix",
",",
"dbxref",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
")",
")",
"if",
"dbxref_curie",
"is",
"not",
"None",
"and",
"prefix",
"!=",
"''",
":",
"if",
"prefix",
"==",
"'HPRD'",
":",
"# proteins are not == genes.",
"model",
".",
"addTriple",
"(",
"gene_id",
",",
"self",
".",
"globaltt",
"[",
"'has gene product'",
"]",
",",
"dbxref_curie",
")",
"continue",
"# skip some of these for now based on curie prefix",
"if",
"prefix",
"in",
"filter_out",
":",
"continue",
"if",
"prefix",
"==",
"'ENSEMBL'",
":",
"model",
".",
"addXref",
"(",
"gene_id",
",",
"dbxref_curie",
")",
"if",
"prefix",
"==",
"'OMIM'",
":",
"if",
"DipperUtil",
".",
"is_omim_disease",
"(",
"dbxref_curie",
")",
":",
"continue",
"try",
":",
"if",
"self",
".",
"class_or_indiv",
".",
"get",
"(",
"gene_id",
")",
"==",
"'C'",
":",
"model",
".",
"addEquivalentClass",
"(",
"gene_id",
",",
"dbxref_curie",
")",
"if",
"taxon",
"in",
"clique_map",
":",
"if",
"clique_map",
"[",
"taxon",
"]",
"==",
"prefix",
":",
"model",
".",
"makeLeader",
"(",
"dbxref_curie",
")",
"elif",
"clique_map",
"[",
"taxon",
"]",
"==",
"gene_id",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
":",
"model",
".",
"makeLeader",
"(",
"gene_id",
")",
"else",
":",
"model",
".",
"addSameIndividual",
"(",
"gene_id",
",",
"dbxref_curie",
")",
"except",
"AssertionError",
"as",
"err",
":",
"LOG",
".",
"warning",
"(",
"\"Error parsing %s: %s\"",
",",
"gene_id",
",",
"err",
")",
"return"
] |
Add equivalentClass and sameAs relationships
Uses external resource map located in
/resources/clique_leader.yaml to determine
if an NCBITaxon ID space is a clique leader
|
[
"Add",
"equivalentClass",
"and",
"sameAs",
"relationships"
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L377-L430
|
18,408
|
monarch-initiative/dipper
|
dipper/sources/NCBIGene.py
|
NCBIGene._get_gene2pubmed
|
def _get_gene2pubmed(self, limit):
"""
Loops through the gene2pubmed file and adds a simple triple to say
that a given publication is_about a gene.
Publications are added as NamedIndividuals.
These are filtered on the taxon.
:param limit:
:return:
"""
src_key = 'gene2pubmed'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing Gene records")
line_counter = 0
myfile = '/'.join((self.rawdir, self.files[src_key]['file']))
LOG.info("FILE: %s", myfile)
assoc_counter = 0
col = self.files[src_key]['columns']
with gzip.open(myfile, 'rb') as tsv:
row = tsv.readline().decode().strip().split('\t')
row[0] = row[0][1:] # strip comment
if col != row:
LOG.info(
'%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n',
src_key, col, row)
for line in tsv:
line_counter += 1
# skip comments
row = line.decode().strip().split('\t')
if row[0][0] == '#':
continue
# (tax_num, gene_num, pubmed_num) = line.split('\t')
# ## set id_filter=None in init if you don't want to have a filter
# if self.id_filter is not None:
# if ((self.id_filter == 'taxids' and \
# (int(tax_num) not in self.tax_ids))
# or (self.id_filter == 'geneids' and \
# (int(gene_num) not in self.gene_ids))):
# continue
# #### end filter
gene_num = row[col.index('GeneID')].strip()
if self.test_mode and int(gene_num) not in self.gene_ids:
continue
tax_num = row[col.index('tax_id')].strip()
if not self.test_mode and tax_num not in self.tax_ids:
continue
pubmed_num = row[col.index('PubMed_ID')].strip()
if gene_num == '-' or pubmed_num == '-':
continue
gene_id = ':'.join(('NCBIGene', gene_num))
pubmed_id = ':'.join(('PMID', pubmed_num))
if self.class_or_indiv.get(gene_id) == 'C':
model.addClassToGraph(gene_id, None)
else:
model.addIndividualToGraph(gene_id, None)
# add the publication as a NamedIndividual
# add type publication
model.addIndividualToGraph(pubmed_id, None, None)
reference = Reference(
graph, pubmed_id, self.globaltt['journal article'])
reference.addRefToGraph()
graph.addTriple(
pubmed_id, self.globaltt['is_about'], gene_id)
assoc_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info(
"Processed %d pub-gene associations", assoc_counter)
return
|
python
|
def _get_gene2pubmed(self, limit):
"""
Loops through the gene2pubmed file and adds a simple triple to say
that a given publication is_about a gene.
Publications are added as NamedIndividuals.
These are filtered on the taxon.
:param limit:
:return:
"""
src_key = 'gene2pubmed'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Processing Gene records")
line_counter = 0
myfile = '/'.join((self.rawdir, self.files[src_key]['file']))
LOG.info("FILE: %s", myfile)
assoc_counter = 0
col = self.files[src_key]['columns']
with gzip.open(myfile, 'rb') as tsv:
row = tsv.readline().decode().strip().split('\t')
row[0] = row[0][1:] # strip comment
if col != row:
LOG.info(
'%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n',
src_key, col, row)
for line in tsv:
line_counter += 1
# skip comments
row = line.decode().strip().split('\t')
if row[0][0] == '#':
continue
# (tax_num, gene_num, pubmed_num) = line.split('\t')
# ## set id_filter=None in init if you don't want to have a filter
# if self.id_filter is not None:
# if ((self.id_filter == 'taxids' and \
# (int(tax_num) not in self.tax_ids))
# or (self.id_filter == 'geneids' and \
# (int(gene_num) not in self.gene_ids))):
# continue
# #### end filter
gene_num = row[col.index('GeneID')].strip()
if self.test_mode and int(gene_num) not in self.gene_ids:
continue
tax_num = row[col.index('tax_id')].strip()
if not self.test_mode and tax_num not in self.tax_ids:
continue
pubmed_num = row[col.index('PubMed_ID')].strip()
if gene_num == '-' or pubmed_num == '-':
continue
gene_id = ':'.join(('NCBIGene', gene_num))
pubmed_id = ':'.join(('PMID', pubmed_num))
if self.class_or_indiv.get(gene_id) == 'C':
model.addClassToGraph(gene_id, None)
else:
model.addIndividualToGraph(gene_id, None)
# add the publication as a NamedIndividual
# add type publication
model.addIndividualToGraph(pubmed_id, None, None)
reference = Reference(
graph, pubmed_id, self.globaltt['journal article'])
reference.addRefToGraph()
graph.addTriple(
pubmed_id, self.globaltt['is_about'], gene_id)
assoc_counter += 1
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info(
"Processed %d pub-gene associations", assoc_counter)
return
|
[
"def",
"_get_gene2pubmed",
"(",
"self",
",",
"limit",
")",
":",
"src_key",
"=",
"'gene2pubmed'",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"LOG",
".",
"info",
"(",
"\"Processing Gene records\"",
")",
"line_counter",
"=",
"0",
"myfile",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"src_key",
"]",
"[",
"'file'",
"]",
")",
")",
"LOG",
".",
"info",
"(",
"\"FILE: %s\"",
",",
"myfile",
")",
"assoc_counter",
"=",
"0",
"col",
"=",
"self",
".",
"files",
"[",
"src_key",
"]",
"[",
"'columns'",
"]",
"with",
"gzip",
".",
"open",
"(",
"myfile",
",",
"'rb'",
")",
"as",
"tsv",
":",
"row",
"=",
"tsv",
".",
"readline",
"(",
")",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"row",
"[",
"0",
"]",
"=",
"row",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"# strip comment",
"if",
"col",
"!=",
"row",
":",
"LOG",
".",
"info",
"(",
"'%s\\nExpected Headers:\\t%s\\nRecived Headers:\\t %s\\n'",
",",
"src_key",
",",
"col",
",",
"row",
")",
"for",
"line",
"in",
"tsv",
":",
"line_counter",
"+=",
"1",
"# skip comments",
"row",
"=",
"line",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"row",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"# (tax_num, gene_num, pubmed_num) = line.split('\\t')",
"# ## set id_filter=None in init if you don't want to have a filter",
"# if self.id_filter is not None:",
"# if ((self.id_filter == 'taxids' and \\",
"# (int(tax_num) not in self.tax_ids))",
"# or (self.id_filter == 'geneids' and \\",
"# (int(gene_num) not in self.gene_ids))):",
"# continue",
"# #### end filter",
"gene_num",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'GeneID'",
")",
"]",
".",
"strip",
"(",
")",
"if",
"self",
".",
"test_mode",
"and",
"int",
"(",
"gene_num",
")",
"not",
"in",
"self",
".",
"gene_ids",
":",
"continue",
"tax_num",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'tax_id'",
")",
"]",
".",
"strip",
"(",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"tax_num",
"not",
"in",
"self",
".",
"tax_ids",
":",
"continue",
"pubmed_num",
"=",
"row",
"[",
"col",
".",
"index",
"(",
"'PubMed_ID'",
")",
"]",
".",
"strip",
"(",
")",
"if",
"gene_num",
"==",
"'-'",
"or",
"pubmed_num",
"==",
"'-'",
":",
"continue",
"gene_id",
"=",
"':'",
".",
"join",
"(",
"(",
"'NCBIGene'",
",",
"gene_num",
")",
")",
"pubmed_id",
"=",
"':'",
".",
"join",
"(",
"(",
"'PMID'",
",",
"pubmed_num",
")",
")",
"if",
"self",
".",
"class_or_indiv",
".",
"get",
"(",
"gene_id",
")",
"==",
"'C'",
":",
"model",
".",
"addClassToGraph",
"(",
"gene_id",
",",
"None",
")",
"else",
":",
"model",
".",
"addIndividualToGraph",
"(",
"gene_id",
",",
"None",
")",
"# add the publication as a NamedIndividual",
"# add type publication",
"model",
".",
"addIndividualToGraph",
"(",
"pubmed_id",
",",
"None",
",",
"None",
")",
"reference",
"=",
"Reference",
"(",
"graph",
",",
"pubmed_id",
",",
"self",
".",
"globaltt",
"[",
"'journal article'",
"]",
")",
"reference",
".",
"addRefToGraph",
"(",
")",
"graph",
".",
"addTriple",
"(",
"pubmed_id",
",",
"self",
".",
"globaltt",
"[",
"'is_about'",
"]",
",",
"gene_id",
")",
"assoc_counter",
"+=",
"1",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Processed %d pub-gene associations\"",
",",
"assoc_counter",
")",
"return"
] |
Loops through the gene2pubmed file and adds a simple triple to say
that a given publication is_about a gene.
Publications are added as NamedIndividuals.
These are filtered on the taxon.
:param limit:
:return:
|
[
"Loops",
"through",
"the",
"gene2pubmed",
"file",
"and",
"adds",
"a",
"simple",
"triple",
"to",
"say",
"that",
"a",
"given",
"publication",
"is_about",
"a",
"gene",
".",
"Publications",
"are",
"added",
"as",
"NamedIndividuals",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L516-L598
|
18,409
|
monarch-initiative/dipper
|
dipper/sources/OMIM.py
|
OMIM.process_entries
|
def process_entries(
self, omimids, transform, included_fields=None, graph=None, limit=None,
globaltt=None
):
"""
Given a list of omim ids,
this will use the omim API to fetch the entries, according to the
```included_fields``` passed as a parameter.
If a transformation function is supplied,
this will iterate over each entry,
and either add the results to the supplied ```graph```
or will return a set of processed entries that the calling function
can further iterate.
If no ```included_fields``` are provided, this will simply fetch
the basic entry from omim,
which includes an entry's: prefix, mimNumber, status, and titles.
:param omimids: the set of omim entry ids to fetch using their API
:param transform: Function to transform each omim entry when looping
:param included_fields: A set of what fields are required to retrieve
from the API
:param graph: the graph to add the transformed data into
:return:
"""
omimparams = {}
# add the included_fields as parameters
if included_fields is not None and included_fields:
omimparams['include'] = ','.join(included_fields)
processed_entries = list()
# scrub any omim prefixes from the omimids before processing
# cleanomimids = set()
# for omimid in omimids:
# scrubbed = str(omimid).split(':')[-1]
# if re.match(r'^\d+$', str(scrubbed)):
# cleanomimids.update(scrubbed)
# omimids = list(cleanomimids)
cleanomimids = [o.split(':')[-1] for o in omimids]
diff = set(omimids) - set(cleanomimids)
if diff:
LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff))
omimids = cleanomimids
else:
cleanomimids = list()
acc = 0 # for counting
# note that you can only do request batches of 20
# see info about "Limits" at http://omim.org/help/api
# TODO 2017 May seems a majority of many groups of 20
# are producing python None for RDF triple Objects
groupsize = 20
if not self.test_mode and limit is not None:
# just in case the limit is larger than the number of records,
maxit = limit
if limit > len(omimids):
maxit = len(omimids)
else:
maxit = len(omimids)
while acc < maxit:
end = min((maxit, acc + groupsize))
# iterate through the omim ids list,
# and fetch from the OMIM api in batches of 20
if self.test_mode:
intersect = list(
set([str(i) for i in self.test_ids]) & set(omimids[acc:end]))
# some of the test ids are in the omimids
if intersect:
LOG.info("found test ids: %s", intersect)
omimparams.update({'mimNumber': ','.join(intersect)})
else:
acc += groupsize
continue
else:
omimparams.update({'mimNumber': ','.join(omimids[acc:end])})
url = OMIMAPI + urllib.parse.urlencode(omimparams)
try:
req = urllib.request.urlopen(url)
except HTTPError as e: # URLError?
LOG.warning('fetching: %s', url)
error_msg = e.read()
if re.search(r'The API key: .* is invalid', str(error_msg)):
msg = "API Key not valid"
raise HTTPError(url, e.code, msg, e.hdrs, e.fp)
LOG.error("Failed with: %s", str(error_msg))
break
resp = req.read().decode()
acc += groupsize
myjson = json.loads(resp)
# snag a copy
with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp:
json.dump(myjson, fp)
entries = myjson['omim']['entryList']
for e in entries:
# apply the data transformation, and save it to the graph
processed_entry = transform(e, graph, globaltt)
if processed_entry is not None:
processed_entries.append(processed_entry)
# ### end iterating over batch of entries
return processed_entries
|
python
|
def process_entries(
self, omimids, transform, included_fields=None, graph=None, limit=None,
globaltt=None
):
"""
Given a list of omim ids,
this will use the omim API to fetch the entries, according to the
```included_fields``` passed as a parameter.
If a transformation function is supplied,
this will iterate over each entry,
and either add the results to the supplied ```graph```
or will return a set of processed entries that the calling function
can further iterate.
If no ```included_fields``` are provided, this will simply fetch
the basic entry from omim,
which includes an entry's: prefix, mimNumber, status, and titles.
:param omimids: the set of omim entry ids to fetch using their API
:param transform: Function to transform each omim entry when looping
:param included_fields: A set of what fields are required to retrieve
from the API
:param graph: the graph to add the transformed data into
:return:
"""
omimparams = {}
# add the included_fields as parameters
if included_fields is not None and included_fields:
omimparams['include'] = ','.join(included_fields)
processed_entries = list()
# scrub any omim prefixes from the omimids before processing
# cleanomimids = set()
# for omimid in omimids:
# scrubbed = str(omimid).split(':')[-1]
# if re.match(r'^\d+$', str(scrubbed)):
# cleanomimids.update(scrubbed)
# omimids = list(cleanomimids)
cleanomimids = [o.split(':')[-1] for o in omimids]
diff = set(omimids) - set(cleanomimids)
if diff:
LOG.warning('OMIM has %i dirty bits see"\n %s', len(diff), str(diff))
omimids = cleanomimids
else:
cleanomimids = list()
acc = 0 # for counting
# note that you can only do request batches of 20
# see info about "Limits" at http://omim.org/help/api
# TODO 2017 May seems a majority of many groups of 20
# are producing python None for RDF triple Objects
groupsize = 20
if not self.test_mode and limit is not None:
# just in case the limit is larger than the number of records,
maxit = limit
if limit > len(omimids):
maxit = len(omimids)
else:
maxit = len(omimids)
while acc < maxit:
end = min((maxit, acc + groupsize))
# iterate through the omim ids list,
# and fetch from the OMIM api in batches of 20
if self.test_mode:
intersect = list(
set([str(i) for i in self.test_ids]) & set(omimids[acc:end]))
# some of the test ids are in the omimids
if intersect:
LOG.info("found test ids: %s", intersect)
omimparams.update({'mimNumber': ','.join(intersect)})
else:
acc += groupsize
continue
else:
omimparams.update({'mimNumber': ','.join(omimids[acc:end])})
url = OMIMAPI + urllib.parse.urlencode(omimparams)
try:
req = urllib.request.urlopen(url)
except HTTPError as e: # URLError?
LOG.warning('fetching: %s', url)
error_msg = e.read()
if re.search(r'The API key: .* is invalid', str(error_msg)):
msg = "API Key not valid"
raise HTTPError(url, e.code, msg, e.hdrs, e.fp)
LOG.error("Failed with: %s", str(error_msg))
break
resp = req.read().decode()
acc += groupsize
myjson = json.loads(resp)
# snag a copy
with open('./raw/omim/_' + str(acc) + '.json', 'w') as fp:
json.dump(myjson, fp)
entries = myjson['omim']['entryList']
for e in entries:
# apply the data transformation, and save it to the graph
processed_entry = transform(e, graph, globaltt)
if processed_entry is not None:
processed_entries.append(processed_entry)
# ### end iterating over batch of entries
return processed_entries
|
[
"def",
"process_entries",
"(",
"self",
",",
"omimids",
",",
"transform",
",",
"included_fields",
"=",
"None",
",",
"graph",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"globaltt",
"=",
"None",
")",
":",
"omimparams",
"=",
"{",
"}",
"# add the included_fields as parameters",
"if",
"included_fields",
"is",
"not",
"None",
"and",
"included_fields",
":",
"omimparams",
"[",
"'include'",
"]",
"=",
"','",
".",
"join",
"(",
"included_fields",
")",
"processed_entries",
"=",
"list",
"(",
")",
"# scrub any omim prefixes from the omimids before processing",
"# cleanomimids = set()",
"# for omimid in omimids:",
"# scrubbed = str(omimid).split(':')[-1]",
"# if re.match(r'^\\d+$', str(scrubbed)):",
"# cleanomimids.update(scrubbed)",
"# omimids = list(cleanomimids)",
"cleanomimids",
"=",
"[",
"o",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
"for",
"o",
"in",
"omimids",
"]",
"diff",
"=",
"set",
"(",
"omimids",
")",
"-",
"set",
"(",
"cleanomimids",
")",
"if",
"diff",
":",
"LOG",
".",
"warning",
"(",
"'OMIM has %i dirty bits see\"\\n %s'",
",",
"len",
"(",
"diff",
")",
",",
"str",
"(",
"diff",
")",
")",
"omimids",
"=",
"cleanomimids",
"else",
":",
"cleanomimids",
"=",
"list",
"(",
")",
"acc",
"=",
"0",
"# for counting",
"# note that you can only do request batches of 20",
"# see info about \"Limits\" at http://omim.org/help/api",
"# TODO 2017 May seems a majority of many groups of 20",
"# are producing python None for RDF triple Objects",
"groupsize",
"=",
"20",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
":",
"# just in case the limit is larger than the number of records,",
"maxit",
"=",
"limit",
"if",
"limit",
">",
"len",
"(",
"omimids",
")",
":",
"maxit",
"=",
"len",
"(",
"omimids",
")",
"else",
":",
"maxit",
"=",
"len",
"(",
"omimids",
")",
"while",
"acc",
"<",
"maxit",
":",
"end",
"=",
"min",
"(",
"(",
"maxit",
",",
"acc",
"+",
"groupsize",
")",
")",
"# iterate through the omim ids list,",
"# and fetch from the OMIM api in batches of 20",
"if",
"self",
".",
"test_mode",
":",
"intersect",
"=",
"list",
"(",
"set",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"test_ids",
"]",
")",
"&",
"set",
"(",
"omimids",
"[",
"acc",
":",
"end",
"]",
")",
")",
"# some of the test ids are in the omimids",
"if",
"intersect",
":",
"LOG",
".",
"info",
"(",
"\"found test ids: %s\"",
",",
"intersect",
")",
"omimparams",
".",
"update",
"(",
"{",
"'mimNumber'",
":",
"','",
".",
"join",
"(",
"intersect",
")",
"}",
")",
"else",
":",
"acc",
"+=",
"groupsize",
"continue",
"else",
":",
"omimparams",
".",
"update",
"(",
"{",
"'mimNumber'",
":",
"','",
".",
"join",
"(",
"omimids",
"[",
"acc",
":",
"end",
"]",
")",
"}",
")",
"url",
"=",
"OMIMAPI",
"+",
"urllib",
".",
"parse",
".",
"urlencode",
"(",
"omimparams",
")",
"try",
":",
"req",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"except",
"HTTPError",
"as",
"e",
":",
"# URLError?",
"LOG",
".",
"warning",
"(",
"'fetching: %s'",
",",
"url",
")",
"error_msg",
"=",
"e",
".",
"read",
"(",
")",
"if",
"re",
".",
"search",
"(",
"r'The API key: .* is invalid'",
",",
"str",
"(",
"error_msg",
")",
")",
":",
"msg",
"=",
"\"API Key not valid\"",
"raise",
"HTTPError",
"(",
"url",
",",
"e",
".",
"code",
",",
"msg",
",",
"e",
".",
"hdrs",
",",
"e",
".",
"fp",
")",
"LOG",
".",
"error",
"(",
"\"Failed with: %s\"",
",",
"str",
"(",
"error_msg",
")",
")",
"break",
"resp",
"=",
"req",
".",
"read",
"(",
")",
".",
"decode",
"(",
")",
"acc",
"+=",
"groupsize",
"myjson",
"=",
"json",
".",
"loads",
"(",
"resp",
")",
"# snag a copy",
"with",
"open",
"(",
"'./raw/omim/_'",
"+",
"str",
"(",
"acc",
")",
"+",
"'.json'",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"myjson",
",",
"fp",
")",
"entries",
"=",
"myjson",
"[",
"'omim'",
"]",
"[",
"'entryList'",
"]",
"for",
"e",
"in",
"entries",
":",
"# apply the data transformation, and save it to the graph",
"processed_entry",
"=",
"transform",
"(",
"e",
",",
"graph",
",",
"globaltt",
")",
"if",
"processed_entry",
"is",
"not",
"None",
":",
"processed_entries",
".",
"append",
"(",
"processed_entry",
")",
"# ### end iterating over batch of entries",
"return",
"processed_entries"
] |
Given a list of omim ids,
this will use the omim API to fetch the entries, according to the
```included_fields``` passed as a parameter.
If a transformation function is supplied,
this will iterate over each entry,
and either add the results to the supplied ```graph```
or will return a set of processed entries that the calling function
can further iterate.
If no ```included_fields``` are provided, this will simply fetch
the basic entry from omim,
which includes an entry's: prefix, mimNumber, status, and titles.
:param omimids: the set of omim entry ids to fetch using their API
:param transform: Function to transform each omim entry when looping
:param included_fields: A set of what fields are required to retrieve
from the API
:param graph: the graph to add the transformed data into
:return:
|
[
"Given",
"a",
"list",
"of",
"omim",
"ids",
"this",
"will",
"use",
"the",
"omim",
"API",
"to",
"fetch",
"the",
"entries",
"according",
"to",
"the",
"included_fields",
"passed",
"as",
"a",
"parameter",
".",
"If",
"a",
"transformation",
"function",
"is",
"supplied",
"this",
"will",
"iterate",
"over",
"each",
"entry",
"and",
"either",
"add",
"the",
"results",
"to",
"the",
"supplied",
"graph",
"or",
"will",
"return",
"a",
"set",
"of",
"processed",
"entries",
"that",
"the",
"calling",
"function",
"can",
"further",
"iterate",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L253-L367
|
18,410
|
monarch-initiative/dipper
|
dipper/sources/OMIM.py
|
OMIM._process_all
|
def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
"""
omimids = self._get_omim_ids()
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere
model.addClassToGraph(tax_id, None) # label added elsewhere
includes = set()
includes.add('all')
self.process_entries(
omimids, self._transform_entry, includes, graph, limit, self.globaltt)
|
python
|
def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
"""
omimids = self._get_omim_ids()
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere
model.addClassToGraph(tax_id, None) # label added elsewhere
includes = set()
includes.add('all')
self.process_entries(
omimids, self._transform_entry, includes, graph, limit, self.globaltt)
|
[
"def",
"_process_all",
"(",
"self",
",",
"limit",
")",
":",
"omimids",
"=",
"self",
".",
"_get_omim_ids",
"(",
")",
"LOG",
".",
"info",
"(",
"'Have %i omim numbers to fetch records from their API'",
",",
"len",
"(",
"omimids",
")",
")",
"LOG",
".",
"info",
"(",
"'Have %i omim types '",
",",
"len",
"(",
"self",
".",
"omim_type",
")",
")",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"model",
"=",
"Model",
"(",
"graph",
")",
"tax_label",
"=",
"'Homo sapiens'",
"tax_id",
"=",
"self",
".",
"globaltt",
"[",
"tax_label",
"]",
"# add genome and taxon",
"geno",
".",
"addGenome",
"(",
"tax_id",
",",
"tax_label",
")",
"# tax label can get added elsewhere",
"model",
".",
"addClassToGraph",
"(",
"tax_id",
",",
"None",
")",
"# label added elsewhere",
"includes",
"=",
"set",
"(",
")",
"includes",
".",
"add",
"(",
"'all'",
")",
"self",
".",
"process_entries",
"(",
"omimids",
",",
"self",
".",
"_transform_entry",
",",
"includes",
",",
"graph",
",",
"limit",
",",
"self",
".",
"globaltt",
")"
] |
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
|
[
"This",
"takes",
"the",
"list",
"of",
"omim",
"identifiers",
"from",
"the",
"omim",
".",
"txt",
".",
"Z",
"file",
"and",
"iteratively",
"queries",
"the",
"omim",
"api",
"for",
"the",
"json",
"-",
"formatted",
"data",
".",
"This",
"will",
"create",
"OMIM",
"classes",
"with",
"the",
"label",
"definition",
"and",
"some",
"synonyms",
".",
"If",
"an",
"entry",
"is",
"removed",
"it",
"is",
"added",
"as",
"a",
"deprecated",
"class",
".",
"If",
"an",
"entry",
"is",
"moved",
"it",
"is",
"deprecated",
"and",
"consider",
"annotations",
"are",
"added",
"."
] |
24cc80db355bbe15776edc5c7b41e0886959ba41
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L369-L412
|
18,411
|
ethereum/py-trie
|
trie/smt.py
|
SparseMerkleProof.update
|
def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]):
"""
Merge an update for another key with the one we are tracking internally.
:param key: keypath of the update we are processing
:param value: value of the update we are processing
:param node_updates: sequence of sibling nodes (in root->leaf order)
must be at least as large as the first diverging
key in the keypath
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
# Path diff is the logical XOR of the updated key and this account
path_diff = (to_int(self.key) ^ to_int(key))
# Same key (diff of 0), update the tracked value
if path_diff == 0:
self._value = value
# No need to update branch
else:
# Find the first mismatched bit between keypaths. This is
# where the branch point occurs, and we should update the
# sibling node in the source branch at the branch point.
# NOTE: Keys are in MSB->LSB (root->leaf) order.
# Node lists are in root->leaf order.
# Be sure to convert between them effectively.
for bit in reversed(range(self._branch_size)):
if path_diff & (1 << bit) > 0:
branch_point = (self._branch_size - 1) - bit
break
# NOTE: node_updates only has to be as long as necessary
# to obtain the update. This allows an optimization
# of pruning updates to the maximum possible depth
# that would be required to update, which may be
# significantly smaller than the tree depth.
if len(node_updates) <= branch_point:
raise ValidationError("Updated node list is not deep enough")
# Update sibling node in the branch where our key differs from the update
self._branch[branch_point] = node_updates[branch_point]
|
python
|
def update(self, key: bytes, value: bytes, node_updates: Sequence[Hash32]):
"""
Merge an update for another key with the one we are tracking internally.
:param key: keypath of the update we are processing
:param value: value of the update we are processing
:param node_updates: sequence of sibling nodes (in root->leaf order)
must be at least as large as the first diverging
key in the keypath
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
# Path diff is the logical XOR of the updated key and this account
path_diff = (to_int(self.key) ^ to_int(key))
# Same key (diff of 0), update the tracked value
if path_diff == 0:
self._value = value
# No need to update branch
else:
# Find the first mismatched bit between keypaths. This is
# where the branch point occurs, and we should update the
# sibling node in the source branch at the branch point.
# NOTE: Keys are in MSB->LSB (root->leaf) order.
# Node lists are in root->leaf order.
# Be sure to convert between them effectively.
for bit in reversed(range(self._branch_size)):
if path_diff & (1 << bit) > 0:
branch_point = (self._branch_size - 1) - bit
break
# NOTE: node_updates only has to be as long as necessary
# to obtain the update. This allows an optimization
# of pruning updates to the maximum possible depth
# that would be required to update, which may be
# significantly smaller than the tree depth.
if len(node_updates) <= branch_point:
raise ValidationError("Updated node list is not deep enough")
# Update sibling node in the branch where our key differs from the update
self._branch[branch_point] = node_updates[branch_point]
|
[
"def",
"update",
"(",
"self",
",",
"key",
":",
"bytes",
",",
"value",
":",
"bytes",
",",
"node_updates",
":",
"Sequence",
"[",
"Hash32",
"]",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"validate_length",
"(",
"key",
",",
"self",
".",
"_key_size",
")",
"# Path diff is the logical XOR of the updated key and this account",
"path_diff",
"=",
"(",
"to_int",
"(",
"self",
".",
"key",
")",
"^",
"to_int",
"(",
"key",
")",
")",
"# Same key (diff of 0), update the tracked value",
"if",
"path_diff",
"==",
"0",
":",
"self",
".",
"_value",
"=",
"value",
"# No need to update branch",
"else",
":",
"# Find the first mismatched bit between keypaths. This is",
"# where the branch point occurs, and we should update the",
"# sibling node in the source branch at the branch point.",
"# NOTE: Keys are in MSB->LSB (root->leaf) order.",
"# Node lists are in root->leaf order.",
"# Be sure to convert between them effectively.",
"for",
"bit",
"in",
"reversed",
"(",
"range",
"(",
"self",
".",
"_branch_size",
")",
")",
":",
"if",
"path_diff",
"&",
"(",
"1",
"<<",
"bit",
")",
">",
"0",
":",
"branch_point",
"=",
"(",
"self",
".",
"_branch_size",
"-",
"1",
")",
"-",
"bit",
"break",
"# NOTE: node_updates only has to be as long as necessary",
"# to obtain the update. This allows an optimization",
"# of pruning updates to the maximum possible depth",
"# that would be required to update, which may be",
"# significantly smaller than the tree depth.",
"if",
"len",
"(",
"node_updates",
")",
"<=",
"branch_point",
":",
"raise",
"ValidationError",
"(",
"\"Updated node list is not deep enough\"",
")",
"# Update sibling node in the branch where our key differs from the update",
"self",
".",
"_branch",
"[",
"branch_point",
"]",
"=",
"node_updates",
"[",
"branch_point",
"]"
] |
Merge an update for another key with the one we are tracking internally.
:param key: keypath of the update we are processing
:param value: value of the update we are processing
:param node_updates: sequence of sibling nodes (in root->leaf order)
must be at least as large as the first diverging
key in the keypath
|
[
"Merge",
"an",
"update",
"for",
"another",
"key",
"with",
"the",
"one",
"we",
"are",
"tracking",
"internally",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L144-L186
|
18,412
|
ethereum/py-trie
|
trie/smt.py
|
SparseMerkleTree._get
|
def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]:
"""
Returns db value and branch in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
branch = []
target_bit = 1 << (self.depth - 1)
path = to_int(key)
node_hash = self.root_hash
# Append the sibling node to the branch
# Iterate on the parent
for _ in range(self.depth):
node = self.db[node_hash]
left, right = node[:32], node[32:]
if path & target_bit:
branch.append(left)
node_hash = right
else:
branch.append(right)
node_hash = left
target_bit >>= 1
# Value is the last hash in the chain
# NOTE: Didn't do exception here for testing purposes
return self.db[node_hash], tuple(branch)
|
python
|
def _get(self, key: bytes) -> Tuple[bytes, Tuple[Hash32]]:
"""
Returns db value and branch in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
branch = []
target_bit = 1 << (self.depth - 1)
path = to_int(key)
node_hash = self.root_hash
# Append the sibling node to the branch
# Iterate on the parent
for _ in range(self.depth):
node = self.db[node_hash]
left, right = node[:32], node[32:]
if path & target_bit:
branch.append(left)
node_hash = right
else:
branch.append(right)
node_hash = left
target_bit >>= 1
# Value is the last hash in the chain
# NOTE: Didn't do exception here for testing purposes
return self.db[node_hash], tuple(branch)
|
[
"def",
"_get",
"(",
"self",
",",
"key",
":",
"bytes",
")",
"->",
"Tuple",
"[",
"bytes",
",",
"Tuple",
"[",
"Hash32",
"]",
"]",
":",
"validate_is_bytes",
"(",
"key",
")",
"validate_length",
"(",
"key",
",",
"self",
".",
"_key_size",
")",
"branch",
"=",
"[",
"]",
"target_bit",
"=",
"1",
"<<",
"(",
"self",
".",
"depth",
"-",
"1",
")",
"path",
"=",
"to_int",
"(",
"key",
")",
"node_hash",
"=",
"self",
".",
"root_hash",
"# Append the sibling node to the branch",
"# Iterate on the parent",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"depth",
")",
":",
"node",
"=",
"self",
".",
"db",
"[",
"node_hash",
"]",
"left",
",",
"right",
"=",
"node",
"[",
":",
"32",
"]",
",",
"node",
"[",
"32",
":",
"]",
"if",
"path",
"&",
"target_bit",
":",
"branch",
".",
"append",
"(",
"left",
")",
"node_hash",
"=",
"right",
"else",
":",
"branch",
".",
"append",
"(",
"right",
")",
"node_hash",
"=",
"left",
"target_bit",
">>=",
"1",
"# Value is the last hash in the chain",
"# NOTE: Didn't do exception here for testing purposes",
"return",
"self",
".",
"db",
"[",
"node_hash",
"]",
",",
"tuple",
"(",
"branch",
")"
] |
Returns db value and branch in root->leaf order
|
[
"Returns",
"db",
"value",
"and",
"branch",
"in",
"root",
"-",
">",
"leaf",
"order"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L271-L297
|
18,413
|
ethereum/py-trie
|
trie/smt.py
|
SparseMerkleTree.set
|
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]:
"""
Returns all updated hashes in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
validate_is_bytes(value)
path = to_int(key)
node = value
_, branch = self._get(key)
proof_update = [] # Keep track of proof updates
target_bit = 1
# branch is in root->leaf order, so flip
for sibling_node in reversed(branch):
# Set
node_hash = keccak(node)
proof_update.append(node_hash)
self.db[node_hash] = node
# Update
if (path & target_bit):
node = sibling_node + node_hash
else:
node = node_hash + sibling_node
target_bit <<= 1
# Finally, update root hash
self.root_hash = keccak(node)
self.db[self.root_hash] = node
# updates need to be in root->leaf order, so flip back
return tuple(reversed(proof_update))
|
python
|
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]:
"""
Returns all updated hashes in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
validate_is_bytes(value)
path = to_int(key)
node = value
_, branch = self._get(key)
proof_update = [] # Keep track of proof updates
target_bit = 1
# branch is in root->leaf order, so flip
for sibling_node in reversed(branch):
# Set
node_hash = keccak(node)
proof_update.append(node_hash)
self.db[node_hash] = node
# Update
if (path & target_bit):
node = sibling_node + node_hash
else:
node = node_hash + sibling_node
target_bit <<= 1
# Finally, update root hash
self.root_hash = keccak(node)
self.db[self.root_hash] = node
# updates need to be in root->leaf order, so flip back
return tuple(reversed(proof_update))
|
[
"def",
"set",
"(",
"self",
",",
"key",
":",
"bytes",
",",
"value",
":",
"bytes",
")",
"->",
"Tuple",
"[",
"Hash32",
"]",
":",
"validate_is_bytes",
"(",
"key",
")",
"validate_length",
"(",
"key",
",",
"self",
".",
"_key_size",
")",
"validate_is_bytes",
"(",
"value",
")",
"path",
"=",
"to_int",
"(",
"key",
")",
"node",
"=",
"value",
"_",
",",
"branch",
"=",
"self",
".",
"_get",
"(",
"key",
")",
"proof_update",
"=",
"[",
"]",
"# Keep track of proof updates",
"target_bit",
"=",
"1",
"# branch is in root->leaf order, so flip",
"for",
"sibling_node",
"in",
"reversed",
"(",
"branch",
")",
":",
"# Set",
"node_hash",
"=",
"keccak",
"(",
"node",
")",
"proof_update",
".",
"append",
"(",
"node_hash",
")",
"self",
".",
"db",
"[",
"node_hash",
"]",
"=",
"node",
"# Update",
"if",
"(",
"path",
"&",
"target_bit",
")",
":",
"node",
"=",
"sibling_node",
"+",
"node_hash",
"else",
":",
"node",
"=",
"node_hash",
"+",
"sibling_node",
"target_bit",
"<<=",
"1",
"# Finally, update root hash",
"self",
".",
"root_hash",
"=",
"keccak",
"(",
"node",
")",
"self",
".",
"db",
"[",
"self",
".",
"root_hash",
"]",
"=",
"node",
"# updates need to be in root->leaf order, so flip back",
"return",
"tuple",
"(",
"reversed",
"(",
"proof_update",
")",
")"
] |
Returns all updated hashes in root->leaf order
|
[
"Returns",
"all",
"updated",
"hashes",
"in",
"root",
"-",
">",
"leaf",
"order"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L299-L333
|
18,414
|
ethereum/py-trie
|
trie/smt.py
|
SparseMerkleTree.delete
|
def delete(self, key: bytes) -> Tuple[Hash32]:
"""
Equals to setting the value to None
Returns all updated hashes in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
return self.set(key, self._default)
|
python
|
def delete(self, key: bytes) -> Tuple[Hash32]:
"""
Equals to setting the value to None
Returns all updated hashes in root->leaf order
"""
validate_is_bytes(key)
validate_length(key, self._key_size)
return self.set(key, self._default)
|
[
"def",
"delete",
"(",
"self",
",",
"key",
":",
"bytes",
")",
"->",
"Tuple",
"[",
"Hash32",
"]",
":",
"validate_is_bytes",
"(",
"key",
")",
"validate_length",
"(",
"key",
",",
"self",
".",
"_key_size",
")",
"return",
"self",
".",
"set",
"(",
"key",
",",
"self",
".",
"_default",
")"
] |
Equals to setting the value to None
Returns all updated hashes in root->leaf order
|
[
"Equals",
"to",
"setting",
"the",
"value",
"to",
"None",
"Returns",
"all",
"updated",
"hashes",
"in",
"root",
"-",
">",
"leaf",
"order"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L345-L353
|
18,415
|
ethereum/py-trie
|
trie/sync.py
|
HexaryTrieSync.next_batch
|
def next_batch(self, n=1):
"""Return the next requests that should be dispatched."""
if len(self.queue) == 0:
return []
batch = list(reversed((self.queue[-n:])))
self.queue = self.queue[:-n]
return batch
|
python
|
def next_batch(self, n=1):
"""Return the next requests that should be dispatched."""
if len(self.queue) == 0:
return []
batch = list(reversed((self.queue[-n:])))
self.queue = self.queue[:-n]
return batch
|
[
"def",
"next_batch",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"if",
"len",
"(",
"self",
".",
"queue",
")",
"==",
"0",
":",
"return",
"[",
"]",
"batch",
"=",
"list",
"(",
"reversed",
"(",
"(",
"self",
".",
"queue",
"[",
"-",
"n",
":",
"]",
")",
")",
")",
"self",
".",
"queue",
"=",
"self",
".",
"queue",
"[",
":",
"-",
"n",
"]",
"return",
"batch"
] |
Return the next requests that should be dispatched.
|
[
"Return",
"the",
"next",
"requests",
"that",
"should",
"be",
"dispatched",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L120-L126
|
18,416
|
ethereum/py-trie
|
trie/sync.py
|
HexaryTrieSync.schedule
|
def schedule(self, node_key, parent, depth, leaf_callback, is_raw=False):
"""Schedule a request for the node with the given key."""
if node_key in self._existing_nodes:
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if node_key in self.db:
self._existing_nodes.add(node_key)
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if parent is not None:
parent.dependencies += 1
existing = self.requests.get(node_key)
if existing is not None:
self.logger.debug(
"Already requesting %s, will just update parents list" % node_key)
existing.parents.append(parent)
return
request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw)
# Requests get added to both self.queue and self.requests; the former is used to keep
# track which requests should be sent next, and the latter is used to avoid scheduling a
# request for a given node multiple times.
self.logger.debug("Scheduling retrieval of %s" % encode_hex(request.node_key))
self.requests[request.node_key] = request
bisect.insort(self.queue, request)
|
python
|
def schedule(self, node_key, parent, depth, leaf_callback, is_raw=False):
"""Schedule a request for the node with the given key."""
if node_key in self._existing_nodes:
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if node_key in self.db:
self._existing_nodes.add(node_key)
self.logger.debug("Node %s already exists in db" % encode_hex(node_key))
return
if parent is not None:
parent.dependencies += 1
existing = self.requests.get(node_key)
if existing is not None:
self.logger.debug(
"Already requesting %s, will just update parents list" % node_key)
existing.parents.append(parent)
return
request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw)
# Requests get added to both self.queue and self.requests; the former is used to keep
# track which requests should be sent next, and the latter is used to avoid scheduling a
# request for a given node multiple times.
self.logger.debug("Scheduling retrieval of %s" % encode_hex(request.node_key))
self.requests[request.node_key] = request
bisect.insort(self.queue, request)
|
[
"def",
"schedule",
"(",
"self",
",",
"node_key",
",",
"parent",
",",
"depth",
",",
"leaf_callback",
",",
"is_raw",
"=",
"False",
")",
":",
"if",
"node_key",
"in",
"self",
".",
"_existing_nodes",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Node %s already exists in db\"",
"%",
"encode_hex",
"(",
"node_key",
")",
")",
"return",
"if",
"node_key",
"in",
"self",
".",
"db",
":",
"self",
".",
"_existing_nodes",
".",
"add",
"(",
"node_key",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Node %s already exists in db\"",
"%",
"encode_hex",
"(",
"node_key",
")",
")",
"return",
"if",
"parent",
"is",
"not",
"None",
":",
"parent",
".",
"dependencies",
"+=",
"1",
"existing",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"node_key",
")",
"if",
"existing",
"is",
"not",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Already requesting %s, will just update parents list\"",
"%",
"node_key",
")",
"existing",
".",
"parents",
".",
"append",
"(",
"parent",
")",
"return",
"request",
"=",
"SyncRequest",
"(",
"node_key",
",",
"parent",
",",
"depth",
",",
"leaf_callback",
",",
"is_raw",
")",
"# Requests get added to both self.queue and self.requests; the former is used to keep",
"# track which requests should be sent next, and the latter is used to avoid scheduling a",
"# request for a given node multiple times.",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Scheduling retrieval of %s\"",
"%",
"encode_hex",
"(",
"request",
".",
"node_key",
")",
")",
"self",
".",
"requests",
"[",
"request",
".",
"node_key",
"]",
"=",
"request",
"bisect",
".",
"insort",
"(",
"self",
".",
"queue",
",",
"request",
")"
] |
Schedule a request for the node with the given key.
|
[
"Schedule",
"a",
"request",
"for",
"the",
"node",
"with",
"the",
"given",
"key",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L128-L155
|
18,417
|
ethereum/py-trie
|
trie/sync.py
|
HexaryTrieSync.get_children
|
def get_children(self, request):
"""Return all children of the node retrieved by the given request.
:rtype: A two-tuple with one list containing the children that reference other nodes and
another containing the leaf children.
"""
node = decode_node(request.data)
return _get_children(node, request.depth)
|
python
|
def get_children(self, request):
"""Return all children of the node retrieved by the given request.
:rtype: A two-tuple with one list containing the children that reference other nodes and
another containing the leaf children.
"""
node = decode_node(request.data)
return _get_children(node, request.depth)
|
[
"def",
"get_children",
"(",
"self",
",",
"request",
")",
":",
"node",
"=",
"decode_node",
"(",
"request",
".",
"data",
")",
"return",
"_get_children",
"(",
"node",
",",
"request",
".",
"depth",
")"
] |
Return all children of the node retrieved by the given request.
:rtype: A two-tuple with one list containing the children that reference other nodes and
another containing the leaf children.
|
[
"Return",
"all",
"children",
"of",
"the",
"node",
"retrieved",
"by",
"the",
"given",
"request",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L157-L164
|
18,418
|
ethereum/py-trie
|
trie/sync.py
|
HexaryTrieSync.process
|
def process(self, results):
"""Process request results.
:param results: A list of two-tuples containing the node's key and data.
"""
for node_key, data in results:
request = self.requests.get(node_key)
if request is None:
# This may happen if we resend a request for a node after waiting too long,
# and then eventually get two responses with it.
self.logger.info(
"No SyncRequest found for %s, maybe we got more than one response for it"
% encode_hex(node_key))
return
if request.data is not None:
raise SyncRequestAlreadyProcessed("%s has been processed already" % request)
request.data = data
if request.is_raw:
self.commit(request)
continue
references, leaves = self.get_children(request)
for depth, ref in references:
self.schedule(ref, request, depth, request.leaf_callback)
if request.leaf_callback is not None:
for leaf in leaves:
request.leaf_callback(leaf, request)
if request.dependencies == 0:
self.commit(request)
|
python
|
def process(self, results):
"""Process request results.
:param results: A list of two-tuples containing the node's key and data.
"""
for node_key, data in results:
request = self.requests.get(node_key)
if request is None:
# This may happen if we resend a request for a node after waiting too long,
# and then eventually get two responses with it.
self.logger.info(
"No SyncRequest found for %s, maybe we got more than one response for it"
% encode_hex(node_key))
return
if request.data is not None:
raise SyncRequestAlreadyProcessed("%s has been processed already" % request)
request.data = data
if request.is_raw:
self.commit(request)
continue
references, leaves = self.get_children(request)
for depth, ref in references:
self.schedule(ref, request, depth, request.leaf_callback)
if request.leaf_callback is not None:
for leaf in leaves:
request.leaf_callback(leaf, request)
if request.dependencies == 0:
self.commit(request)
|
[
"def",
"process",
"(",
"self",
",",
"results",
")",
":",
"for",
"node_key",
",",
"data",
"in",
"results",
":",
"request",
"=",
"self",
".",
"requests",
".",
"get",
"(",
"node_key",
")",
"if",
"request",
"is",
"None",
":",
"# This may happen if we resend a request for a node after waiting too long,",
"# and then eventually get two responses with it.",
"self",
".",
"logger",
".",
"info",
"(",
"\"No SyncRequest found for %s, maybe we got more than one response for it\"",
"%",
"encode_hex",
"(",
"node_key",
")",
")",
"return",
"if",
"request",
".",
"data",
"is",
"not",
"None",
":",
"raise",
"SyncRequestAlreadyProcessed",
"(",
"\"%s has been processed already\"",
"%",
"request",
")",
"request",
".",
"data",
"=",
"data",
"if",
"request",
".",
"is_raw",
":",
"self",
".",
"commit",
"(",
"request",
")",
"continue",
"references",
",",
"leaves",
"=",
"self",
".",
"get_children",
"(",
"request",
")",
"for",
"depth",
",",
"ref",
"in",
"references",
":",
"self",
".",
"schedule",
"(",
"ref",
",",
"request",
",",
"depth",
",",
"request",
".",
"leaf_callback",
")",
"if",
"request",
".",
"leaf_callback",
"is",
"not",
"None",
":",
"for",
"leaf",
"in",
"leaves",
":",
"request",
".",
"leaf_callback",
"(",
"leaf",
",",
"request",
")",
"if",
"request",
".",
"dependencies",
"==",
"0",
":",
"self",
".",
"commit",
"(",
"request",
")"
] |
Process request results.
:param results: A list of two-tuples containing the node's key and data.
|
[
"Process",
"request",
"results",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L166-L199
|
18,419
|
ethereum/py-trie
|
trie/branches.py
|
check_if_branch_exist
|
def check_if_branch_exist(db, root_hash, key_prefix):
"""
Given a key prefix, return whether this prefix is
the prefix of an existing key in the trie.
"""
validate_is_bytes(key_prefix)
return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix))
|
python
|
def check_if_branch_exist(db, root_hash, key_prefix):
"""
Given a key prefix, return whether this prefix is
the prefix of an existing key in the trie.
"""
validate_is_bytes(key_prefix)
return _check_if_branch_exist(db, root_hash, encode_to_bin(key_prefix))
|
[
"def",
"check_if_branch_exist",
"(",
"db",
",",
"root_hash",
",",
"key_prefix",
")",
":",
"validate_is_bytes",
"(",
"key_prefix",
")",
"return",
"_check_if_branch_exist",
"(",
"db",
",",
"root_hash",
",",
"encode_to_bin",
"(",
"key_prefix",
")",
")"
] |
Given a key prefix, return whether this prefix is
the prefix of an existing key in the trie.
|
[
"Given",
"a",
"key",
"prefix",
"return",
"whether",
"this",
"prefix",
"is",
"the",
"prefix",
"of",
"an",
"existing",
"key",
"in",
"the",
"trie",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L30-L37
|
18,420
|
ethereum/py-trie
|
trie/branches.py
|
get_branch
|
def get_branch(db, root_hash, key):
"""
Get a long-format Merkle branch
"""
validate_is_bytes(key)
return tuple(_get_branch(db, root_hash, encode_to_bin(key)))
|
python
|
def get_branch(db, root_hash, key):
"""
Get a long-format Merkle branch
"""
validate_is_bytes(key)
return tuple(_get_branch(db, root_hash, encode_to_bin(key)))
|
[
"def",
"get_branch",
"(",
"db",
",",
"root_hash",
",",
"key",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"return",
"tuple",
"(",
"_get_branch",
"(",
"db",
",",
"root_hash",
",",
"encode_to_bin",
"(",
"key",
")",
")",
")"
] |
Get a long-format Merkle branch
|
[
"Get",
"a",
"long",
"-",
"format",
"Merkle",
"branch"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L71-L77
|
18,421
|
ethereum/py-trie
|
trie/branches.py
|
get_witness_for_key_prefix
|
def get_witness_for_key_prefix(db, node_hash, key):
"""
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
"""
validate_is_bytes(key)
return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
|
python
|
def get_witness_for_key_prefix(db, node_hash, key):
"""
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
"""
validate_is_bytes(key)
return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key)))
|
[
"def",
"get_witness_for_key_prefix",
"(",
"db",
",",
"node_hash",
",",
"key",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"return",
"tuple",
"(",
"_get_witness_for_key_prefix",
"(",
"db",
",",
"node_hash",
",",
"encode_to_bin",
"(",
"key",
")",
")",
")"
] |
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
|
[
"Get",
"all",
"witness",
"given",
"a",
"keypath",
"prefix",
".",
"Include"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L155-L165
|
18,422
|
ethereum/py-trie
|
trie/utils/nodes.py
|
encode_branch_node
|
def encode_branch_node(left_child_node_hash, right_child_node_hash):
"""
Serializes a branch node
"""
validate_is_bytes(left_child_node_hash)
validate_length(left_child_node_hash, 32)
validate_is_bytes(right_child_node_hash)
validate_length(right_child_node_hash, 32)
return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
|
python
|
def encode_branch_node(left_child_node_hash, right_child_node_hash):
"""
Serializes a branch node
"""
validate_is_bytes(left_child_node_hash)
validate_length(left_child_node_hash, 32)
validate_is_bytes(right_child_node_hash)
validate_length(right_child_node_hash, 32)
return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
|
[
"def",
"encode_branch_node",
"(",
"left_child_node_hash",
",",
"right_child_node_hash",
")",
":",
"validate_is_bytes",
"(",
"left_child_node_hash",
")",
"validate_length",
"(",
"left_child_node_hash",
",",
"32",
")",
"validate_is_bytes",
"(",
"right_child_node_hash",
")",
"validate_length",
"(",
"right_child_node_hash",
",",
"32",
")",
"return",
"BRANCH_TYPE_PREFIX",
"+",
"left_child_node_hash",
"+",
"right_child_node_hash"
] |
Serializes a branch node
|
[
"Serializes",
"a",
"branch",
"node"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L157-L165
|
18,423
|
ethereum/py-trie
|
trie/utils/nodes.py
|
encode_leaf_node
|
def encode_leaf_node(value):
"""
Serializes a leaf node
"""
validate_is_bytes(value)
if value is None or value == b'':
raise ValidationError("Value of leaf node can not be empty")
return LEAF_TYPE_PREFIX + value
|
python
|
def encode_leaf_node(value):
"""
Serializes a leaf node
"""
validate_is_bytes(value)
if value is None or value == b'':
raise ValidationError("Value of leaf node can not be empty")
return LEAF_TYPE_PREFIX + value
|
[
"def",
"encode_leaf_node",
"(",
"value",
")",
":",
"validate_is_bytes",
"(",
"value",
")",
"if",
"value",
"is",
"None",
"or",
"value",
"==",
"b''",
":",
"raise",
"ValidationError",
"(",
"\"Value of leaf node can not be empty\"",
")",
"return",
"LEAF_TYPE_PREFIX",
"+",
"value"
] |
Serializes a leaf node
|
[
"Serializes",
"a",
"leaf",
"node"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L168-L175
|
18,424
|
ethereum/py-trie
|
trie/utils/db.py
|
ScratchDB.batch_commit
|
def batch_commit(self, *, do_deletes=False):
'''
Batch and commit and end of context
'''
try:
yield
except Exception as exc:
raise exc
else:
for key, value in self.cache.items():
if value is not DELETED:
self.wrapped_db[key] = value
elif do_deletes:
self.wrapped_db.pop(key, None)
# if do_deletes is False, ignore deletes to underlying db
finally:
self.cache = {}
|
python
|
def batch_commit(self, *, do_deletes=False):
'''
Batch and commit and end of context
'''
try:
yield
except Exception as exc:
raise exc
else:
for key, value in self.cache.items():
if value is not DELETED:
self.wrapped_db[key] = value
elif do_deletes:
self.wrapped_db.pop(key, None)
# if do_deletes is False, ignore deletes to underlying db
finally:
self.cache = {}
|
[
"def",
"batch_commit",
"(",
"self",
",",
"*",
",",
"do_deletes",
"=",
"False",
")",
":",
"try",
":",
"yield",
"except",
"Exception",
"as",
"exc",
":",
"raise",
"exc",
"else",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"cache",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"DELETED",
":",
"self",
".",
"wrapped_db",
"[",
"key",
"]",
"=",
"value",
"elif",
"do_deletes",
":",
"self",
".",
"wrapped_db",
".",
"pop",
"(",
"key",
",",
"None",
")",
"# if do_deletes is False, ignore deletes to underlying db",
"finally",
":",
"self",
".",
"cache",
"=",
"{",
"}"
] |
Batch and commit and end of context
|
[
"Batch",
"and",
"commit",
"and",
"end",
"of",
"context"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/db.py#L48-L64
|
18,425
|
ethereum/py-trie
|
trie/hexary.py
|
HexaryTrie._prune_node
|
def _prune_node(self, node):
"""
Prune the given node if context exits cleanly.
"""
if self.is_pruning:
# node is mutable, so capture the key for later pruning now
prune_key, node_body = self._node_to_db_mapping(node)
should_prune = (node_body is not None)
else:
should_prune = False
yield
# Prune only if no exception is raised
if should_prune:
del self.db[prune_key]
|
python
|
def _prune_node(self, node):
"""
Prune the given node if context exits cleanly.
"""
if self.is_pruning:
# node is mutable, so capture the key for later pruning now
prune_key, node_body = self._node_to_db_mapping(node)
should_prune = (node_body is not None)
else:
should_prune = False
yield
# Prune only if no exception is raised
if should_prune:
del self.db[prune_key]
|
[
"def",
"_prune_node",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"is_pruning",
":",
"# node is mutable, so capture the key for later pruning now",
"prune_key",
",",
"node_body",
"=",
"self",
".",
"_node_to_db_mapping",
"(",
"node",
")",
"should_prune",
"=",
"(",
"node_body",
"is",
"not",
"None",
")",
"else",
":",
"should_prune",
"=",
"False",
"yield",
"# Prune only if no exception is raised",
"if",
"should_prune",
":",
"del",
"self",
".",
"db",
"[",
"prune_key",
"]"
] |
Prune the given node if context exits cleanly.
|
[
"Prune",
"the",
"given",
"node",
"if",
"context",
"exits",
"cleanly",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L231-L246
|
18,426
|
ethereum/py-trie
|
trie/hexary.py
|
HexaryTrie._normalize_branch_node
|
def _normalize_branch_node(self, node):
"""
A branch node which is left with only a single non-blank item should be
turned into either a leaf or extension node.
"""
iter_node = iter(node)
if any(iter_node) and any(iter_node):
return node
if node[16]:
return [compute_leaf_key([]), node[16]]
sub_node_idx, sub_node_hash = next(
(idx, v)
for idx, v
in enumerate(node[:16])
if v
)
sub_node = self.get_node(sub_node_hash)
sub_node_type = get_node_type(sub_node)
if sub_node_type in {NODE_TYPE_LEAF, NODE_TYPE_EXTENSION}:
with self._prune_node(sub_node):
new_subnode_key = encode_nibbles(tuple(itertools.chain(
[sub_node_idx],
decode_nibbles(sub_node[0]),
)))
return [new_subnode_key, sub_node[1]]
elif sub_node_type == NODE_TYPE_BRANCH:
subnode_hash = self._persist_node(sub_node)
return [encode_nibbles([sub_node_idx]), subnode_hash]
else:
raise Exception("Invariant: this code block should be unreachable")
|
python
|
def _normalize_branch_node(self, node):
"""
A branch node which is left with only a single non-blank item should be
turned into either a leaf or extension node.
"""
iter_node = iter(node)
if any(iter_node) and any(iter_node):
return node
if node[16]:
return [compute_leaf_key([]), node[16]]
sub_node_idx, sub_node_hash = next(
(idx, v)
for idx, v
in enumerate(node[:16])
if v
)
sub_node = self.get_node(sub_node_hash)
sub_node_type = get_node_type(sub_node)
if sub_node_type in {NODE_TYPE_LEAF, NODE_TYPE_EXTENSION}:
with self._prune_node(sub_node):
new_subnode_key = encode_nibbles(tuple(itertools.chain(
[sub_node_idx],
decode_nibbles(sub_node[0]),
)))
return [new_subnode_key, sub_node[1]]
elif sub_node_type == NODE_TYPE_BRANCH:
subnode_hash = self._persist_node(sub_node)
return [encode_nibbles([sub_node_idx]), subnode_hash]
else:
raise Exception("Invariant: this code block should be unreachable")
|
[
"def",
"_normalize_branch_node",
"(",
"self",
",",
"node",
")",
":",
"iter_node",
"=",
"iter",
"(",
"node",
")",
"if",
"any",
"(",
"iter_node",
")",
"and",
"any",
"(",
"iter_node",
")",
":",
"return",
"node",
"if",
"node",
"[",
"16",
"]",
":",
"return",
"[",
"compute_leaf_key",
"(",
"[",
"]",
")",
",",
"node",
"[",
"16",
"]",
"]",
"sub_node_idx",
",",
"sub_node_hash",
"=",
"next",
"(",
"(",
"idx",
",",
"v",
")",
"for",
"idx",
",",
"v",
"in",
"enumerate",
"(",
"node",
"[",
":",
"16",
"]",
")",
"if",
"v",
")",
"sub_node",
"=",
"self",
".",
"get_node",
"(",
"sub_node_hash",
")",
"sub_node_type",
"=",
"get_node_type",
"(",
"sub_node",
")",
"if",
"sub_node_type",
"in",
"{",
"NODE_TYPE_LEAF",
",",
"NODE_TYPE_EXTENSION",
"}",
":",
"with",
"self",
".",
"_prune_node",
"(",
"sub_node",
")",
":",
"new_subnode_key",
"=",
"encode_nibbles",
"(",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"[",
"sub_node_idx",
"]",
",",
"decode_nibbles",
"(",
"sub_node",
"[",
"0",
"]",
")",
",",
")",
")",
")",
"return",
"[",
"new_subnode_key",
",",
"sub_node",
"[",
"1",
"]",
"]",
"elif",
"sub_node_type",
"==",
"NODE_TYPE_BRANCH",
":",
"subnode_hash",
"=",
"self",
".",
"_persist_node",
"(",
"sub_node",
")",
"return",
"[",
"encode_nibbles",
"(",
"[",
"sub_node_idx",
"]",
")",
",",
"subnode_hash",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Invariant: this code block should be unreachable\"",
")"
] |
A branch node which is left with only a single non-blank item should be
turned into either a leaf or extension node.
|
[
"A",
"branch",
"node",
"which",
"is",
"left",
"with",
"only",
"a",
"single",
"non",
"-",
"blank",
"item",
"should",
"be",
"turned",
"into",
"either",
"a",
"leaf",
"or",
"extension",
"node",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L324-L356
|
18,427
|
ethereum/py-trie
|
trie/hexary.py
|
HexaryTrie._delete_branch_node
|
def _delete_branch_node(self, node, trie_key):
"""
Delete a key from inside or underneath a branch node
"""
if not trie_key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
node_to_delete = self.get_node(node[trie_key[0]])
sub_node = self._delete(node_to_delete, trie_key[1:])
encoded_sub_node = self._persist_node(sub_node)
if encoded_sub_node == node[trie_key[0]]:
return node
node[trie_key[0]] = encoded_sub_node
if encoded_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node
|
python
|
def _delete_branch_node(self, node, trie_key):
"""
Delete a key from inside or underneath a branch node
"""
if not trie_key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
node_to_delete = self.get_node(node[trie_key[0]])
sub_node = self._delete(node_to_delete, trie_key[1:])
encoded_sub_node = self._persist_node(sub_node)
if encoded_sub_node == node[trie_key[0]]:
return node
node[trie_key[0]] = encoded_sub_node
if encoded_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node
|
[
"def",
"_delete_branch_node",
"(",
"self",
",",
"node",
",",
"trie_key",
")",
":",
"if",
"not",
"trie_key",
":",
"node",
"[",
"-",
"1",
"]",
"=",
"BLANK_NODE",
"return",
"self",
".",
"_normalize_branch_node",
"(",
"node",
")",
"node_to_delete",
"=",
"self",
".",
"get_node",
"(",
"node",
"[",
"trie_key",
"[",
"0",
"]",
"]",
")",
"sub_node",
"=",
"self",
".",
"_delete",
"(",
"node_to_delete",
",",
"trie_key",
"[",
"1",
":",
"]",
")",
"encoded_sub_node",
"=",
"self",
".",
"_persist_node",
"(",
"sub_node",
")",
"if",
"encoded_sub_node",
"==",
"node",
"[",
"trie_key",
"[",
"0",
"]",
"]",
":",
"return",
"node",
"node",
"[",
"trie_key",
"[",
"0",
"]",
"]",
"=",
"encoded_sub_node",
"if",
"encoded_sub_node",
"==",
"BLANK_NODE",
":",
"return",
"self",
".",
"_normalize_branch_node",
"(",
"node",
")",
"return",
"node"
] |
Delete a key from inside or underneath a branch node
|
[
"Delete",
"a",
"key",
"from",
"inside",
"or",
"underneath",
"a",
"branch",
"node"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L361-L381
|
18,428
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie.get
|
def get(self, key):
"""
Fetches the value with a given keypath from the given node.
Key will be encoded into binary array format first.
"""
validate_is_bytes(key)
return self._get(self.root_hash, encode_to_bin(key))
|
python
|
def get(self, key):
"""
Fetches the value with a given keypath from the given node.
Key will be encoded into binary array format first.
"""
validate_is_bytes(key)
return self._get(self.root_hash, encode_to_bin(key))
|
[
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"return",
"self",
".",
"_get",
"(",
"self",
".",
"root_hash",
",",
"encode_to_bin",
"(",
"key",
")",
")"
] |
Fetches the value with a given keypath from the given node.
Key will be encoded into binary array format first.
|
[
"Fetches",
"the",
"value",
"with",
"a",
"given",
"keypath",
"from",
"the",
"given",
"node",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L38-L46
|
18,429
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie.set
|
def set(self, key, value):
"""
Sets the value at the given keypath from the given node
Key will be encoded into binary array format first.
"""
validate_is_bytes(key)
validate_is_bytes(value)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), value)
|
python
|
def set(self, key, value):
"""
Sets the value at the given keypath from the given node
Key will be encoded into binary array format first.
"""
validate_is_bytes(key)
validate_is_bytes(value)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), value)
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"validate_is_bytes",
"(",
"value",
")",
"self",
".",
"root_hash",
"=",
"self",
".",
"_set",
"(",
"self",
".",
"root_hash",
",",
"encode_to_bin",
"(",
"key",
")",
",",
"value",
")"
] |
Sets the value at the given keypath from the given node
Key will be encoded into binary array format first.
|
[
"Sets",
"the",
"value",
"at",
"the",
"given",
"keypath",
"from",
"the",
"given",
"node"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L79-L88
|
18,430
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie._set
|
def _set(self, node_hash, keypath, value, if_delete_subtrie=False):
"""
If if_delete_subtrie is set to True, what it will do is that it take in a keypath
and traverse til the end of keypath, then delete the whole subtrie of that node.
Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()
"""
# Empty trie
if node_hash == BLANK_HASH:
if value:
return self._hash_and_save(
encode_kv_node(keypath, self._hash_and_save(encode_leaf_node(value)))
)
else:
return BLANK_HASH
nodetype, left_child, right_child = parse_node(self.db[node_hash])
# Node is a leaf node
if nodetype == LEAF_TYPE:
# Keypath must match, there should be no remaining keypath
if keypath:
raise NodeOverrideError(
"Fail to set the value because the prefix of it's key"
" is the same as existing key")
if if_delete_subtrie:
return BLANK_HASH
return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH
# node is a key-value node
elif nodetype == KV_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_kv_node(
keypath,
node_hash,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
# node is a branch node
elif nodetype == BRANCH_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_branch_node(
keypath,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
raise Exception("Invariant: This shouldn't ever happen")
|
python
|
def _set(self, node_hash, keypath, value, if_delete_subtrie=False):
"""
If if_delete_subtrie is set to True, what it will do is that it take in a keypath
and traverse til the end of keypath, then delete the whole subtrie of that node.
Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()
"""
# Empty trie
if node_hash == BLANK_HASH:
if value:
return self._hash_and_save(
encode_kv_node(keypath, self._hash_and_save(encode_leaf_node(value)))
)
else:
return BLANK_HASH
nodetype, left_child, right_child = parse_node(self.db[node_hash])
# Node is a leaf node
if nodetype == LEAF_TYPE:
# Keypath must match, there should be no remaining keypath
if keypath:
raise NodeOverrideError(
"Fail to set the value because the prefix of it's key"
" is the same as existing key")
if if_delete_subtrie:
return BLANK_HASH
return self._hash_and_save(encode_leaf_node(value)) if value else BLANK_HASH
# node is a key-value node
elif nodetype == KV_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_kv_node(
keypath,
node_hash,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
# node is a branch node
elif nodetype == BRANCH_TYPE:
# Keypath too short
if not keypath:
if if_delete_subtrie:
return BLANK_HASH
else:
raise NodeOverrideError(
"Fail to set the value because it's key"
" is the prefix of other existing key")
return self._set_branch_node(
keypath,
nodetype,
left_child,
right_child,
value,
if_delete_subtrie
)
raise Exception("Invariant: This shouldn't ever happen")
|
[
"def",
"_set",
"(",
"self",
",",
"node_hash",
",",
"keypath",
",",
"value",
",",
"if_delete_subtrie",
"=",
"False",
")",
":",
"# Empty trie",
"if",
"node_hash",
"==",
"BLANK_HASH",
":",
"if",
"value",
":",
"return",
"self",
".",
"_hash_and_save",
"(",
"encode_kv_node",
"(",
"keypath",
",",
"self",
".",
"_hash_and_save",
"(",
"encode_leaf_node",
"(",
"value",
")",
")",
")",
")",
"else",
":",
"return",
"BLANK_HASH",
"nodetype",
",",
"left_child",
",",
"right_child",
"=",
"parse_node",
"(",
"self",
".",
"db",
"[",
"node_hash",
"]",
")",
"# Node is a leaf node",
"if",
"nodetype",
"==",
"LEAF_TYPE",
":",
"# Keypath must match, there should be no remaining keypath",
"if",
"keypath",
":",
"raise",
"NodeOverrideError",
"(",
"\"Fail to set the value because the prefix of it's key\"",
"\" is the same as existing key\"",
")",
"if",
"if_delete_subtrie",
":",
"return",
"BLANK_HASH",
"return",
"self",
".",
"_hash_and_save",
"(",
"encode_leaf_node",
"(",
"value",
")",
")",
"if",
"value",
"else",
"BLANK_HASH",
"# node is a key-value node",
"elif",
"nodetype",
"==",
"KV_TYPE",
":",
"# Keypath too short",
"if",
"not",
"keypath",
":",
"if",
"if_delete_subtrie",
":",
"return",
"BLANK_HASH",
"else",
":",
"raise",
"NodeOverrideError",
"(",
"\"Fail to set the value because it's key\"",
"\" is the prefix of other existing key\"",
")",
"return",
"self",
".",
"_set_kv_node",
"(",
"keypath",
",",
"node_hash",
",",
"nodetype",
",",
"left_child",
",",
"right_child",
",",
"value",
",",
"if_delete_subtrie",
")",
"# node is a branch node",
"elif",
"nodetype",
"==",
"BRANCH_TYPE",
":",
"# Keypath too short",
"if",
"not",
"keypath",
":",
"if",
"if_delete_subtrie",
":",
"return",
"BLANK_HASH",
"else",
":",
"raise",
"NodeOverrideError",
"(",
"\"Fail to set the value because it's key\"",
"\" is the prefix of other existing key\"",
")",
"return",
"self",
".",
"_set_branch_node",
"(",
"keypath",
",",
"nodetype",
",",
"left_child",
",",
"right_child",
",",
"value",
",",
"if_delete_subtrie",
")",
"raise",
"Exception",
"(",
"\"Invariant: This shouldn't ever happen\"",
")"
] |
If if_delete_subtrie is set to True, what it will do is that it take in a keypath
and traverse til the end of keypath, then delete the whole subtrie of that node.
Note: keypath should be in binary array format, i.e., encoded by encode_to_bin()
|
[
"If",
"if_delete_subtrie",
"is",
"set",
"to",
"True",
"what",
"it",
"will",
"do",
"is",
"that",
"it",
"take",
"in",
"a",
"keypath",
"and",
"traverse",
"til",
"the",
"end",
"of",
"keypath",
"then",
"delete",
"the",
"whole",
"subtrie",
"of",
"that",
"node",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L90-L153
|
18,431
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie.delete
|
def delete(self, key):
"""
Equals to setting the value to None
"""
validate_is_bytes(key)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'')
|
python
|
def delete(self, key):
"""
Equals to setting the value to None
"""
validate_is_bytes(key)
self.root_hash = self._set(self.root_hash, encode_to_bin(key), b'')
|
[
"def",
"delete",
"(",
"self",
",",
"key",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"self",
".",
"root_hash",
"=",
"self",
".",
"_set",
"(",
"self",
".",
"root_hash",
",",
"encode_to_bin",
"(",
"key",
")",
",",
"b''",
")"
] |
Equals to setting the value to None
|
[
"Equals",
"to",
"setting",
"the",
"value",
"to",
"None"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L297-L303
|
18,432
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie.delete_subtrie
|
def delete_subtrie(self, key):
"""
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
"""
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
)
|
python
|
def delete_subtrie(self, key):
"""
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
"""
validate_is_bytes(key)
self.root_hash = self._set(
self.root_hash,
encode_to_bin(key),
value=b'',
if_delete_subtrie=True,
)
|
[
"def",
"delete_subtrie",
"(",
"self",
",",
"key",
")",
":",
"validate_is_bytes",
"(",
"key",
")",
"self",
".",
"root_hash",
"=",
"self",
".",
"_set",
"(",
"self",
".",
"root_hash",
",",
"encode_to_bin",
"(",
"key",
")",
",",
"value",
"=",
"b''",
",",
"if_delete_subtrie",
"=",
"True",
",",
")"
] |
Given a key prefix, delete the whole subtrie that starts with the key prefix.
Key will be encoded into binary array format first.
It will call `_set` with `if_delete_subtrie` set to True.
|
[
"Given",
"a",
"key",
"prefix",
"delete",
"the",
"whole",
"subtrie",
"that",
"starts",
"with",
"the",
"key",
"prefix",
"."
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L305-L320
|
18,433
|
ethereum/py-trie
|
trie/binary.py
|
BinaryTrie._hash_and_save
|
def _hash_and_save(self, node):
"""
Saves a node into the database and returns its hash
"""
validate_is_bin_node(node)
node_hash = keccak(node)
self.db[node_hash] = node
return node_hash
|
python
|
def _hash_and_save(self, node):
"""
Saves a node into the database and returns its hash
"""
validate_is_bin_node(node)
node_hash = keccak(node)
self.db[node_hash] = node
return node_hash
|
[
"def",
"_hash_and_save",
"(",
"self",
",",
"node",
")",
":",
"validate_is_bin_node",
"(",
"node",
")",
"node_hash",
"=",
"keccak",
"(",
"node",
")",
"self",
".",
"db",
"[",
"node_hash",
"]",
"=",
"node",
"return",
"node_hash"
] |
Saves a node into the database and returns its hash
|
[
"Saves",
"a",
"node",
"into",
"the",
"database",
"and",
"returns",
"its",
"hash"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L338-L346
|
18,434
|
ethereum/py-trie
|
trie/utils/binaries.py
|
decode_from_bin
|
def decode_from_bin(input_bin):
"""
0100000101010111010000110100100101001001 -> ASCII
"""
for chunk in partition_all(8, input_bin):
yield sum(
2**exp * bit
for exp, bit
in enumerate(reversed(chunk))
)
|
python
|
def decode_from_bin(input_bin):
"""
0100000101010111010000110100100101001001 -> ASCII
"""
for chunk in partition_all(8, input_bin):
yield sum(
2**exp * bit
for exp, bit
in enumerate(reversed(chunk))
)
|
[
"def",
"decode_from_bin",
"(",
"input_bin",
")",
":",
"for",
"chunk",
"in",
"partition_all",
"(",
"8",
",",
"input_bin",
")",
":",
"yield",
"sum",
"(",
"2",
"**",
"exp",
"*",
"bit",
"for",
"exp",
",",
"bit",
"in",
"enumerate",
"(",
"reversed",
"(",
"chunk",
")",
")",
")"
] |
0100000101010111010000110100100101001001 -> ASCII
|
[
"0100000101010111010000110100100101001001",
"-",
">",
"ASCII"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L18-L27
|
18,435
|
ethereum/py-trie
|
trie/utils/binaries.py
|
encode_to_bin
|
def encode_to_bin(value):
"""
ASCII -> 0100000101010111010000110100100101001001
"""
for char in value:
for exp in EXP:
if char & exp:
yield True
else:
yield False
|
python
|
def encode_to_bin(value):
"""
ASCII -> 0100000101010111010000110100100101001001
"""
for char in value:
for exp in EXP:
if char & exp:
yield True
else:
yield False
|
[
"def",
"encode_to_bin",
"(",
"value",
")",
":",
"for",
"char",
"in",
"value",
":",
"for",
"exp",
"in",
"EXP",
":",
"if",
"char",
"&",
"exp",
":",
"yield",
"True",
"else",
":",
"yield",
"False"
] |
ASCII -> 0100000101010111010000110100100101001001
|
[
"ASCII",
"-",
">",
"0100000101010111010000110100100101001001"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L31-L40
|
18,436
|
ethereum/py-trie
|
trie/utils/binaries.py
|
encode_from_bin_keypath
|
def encode_from_bin_keypath(input_bin):
"""
Encodes a sequence of 0s and 1s into tightly packed bytes
Used in encoding key path of a KV-NODE
"""
padded_bin = bytes((4 - len(input_bin)) % 4) + input_bin
prefix = TWO_BITS[len(input_bin) % 4]
if len(padded_bin) % 8 == 4:
return decode_from_bin(PREFIX_00 + prefix + padded_bin)
else:
return decode_from_bin(PREFIX_100000 + prefix + padded_bin)
|
python
|
def encode_from_bin_keypath(input_bin):
"""
Encodes a sequence of 0s and 1s into tightly packed bytes
Used in encoding key path of a KV-NODE
"""
padded_bin = bytes((4 - len(input_bin)) % 4) + input_bin
prefix = TWO_BITS[len(input_bin) % 4]
if len(padded_bin) % 8 == 4:
return decode_from_bin(PREFIX_00 + prefix + padded_bin)
else:
return decode_from_bin(PREFIX_100000 + prefix + padded_bin)
|
[
"def",
"encode_from_bin_keypath",
"(",
"input_bin",
")",
":",
"padded_bin",
"=",
"bytes",
"(",
"(",
"4",
"-",
"len",
"(",
"input_bin",
")",
")",
"%",
"4",
")",
"+",
"input_bin",
"prefix",
"=",
"TWO_BITS",
"[",
"len",
"(",
"input_bin",
")",
"%",
"4",
"]",
"if",
"len",
"(",
"padded_bin",
")",
"%",
"8",
"==",
"4",
":",
"return",
"decode_from_bin",
"(",
"PREFIX_00",
"+",
"prefix",
"+",
"padded_bin",
")",
"else",
":",
"return",
"decode_from_bin",
"(",
"PREFIX_100000",
"+",
"prefix",
"+",
"padded_bin",
")"
] |
Encodes a sequence of 0s and 1s into tightly packed bytes
Used in encoding key path of a KV-NODE
|
[
"Encodes",
"a",
"sequence",
"of",
"0s",
"and",
"1s",
"into",
"tightly",
"packed",
"bytes",
"Used",
"in",
"encoding",
"key",
"path",
"of",
"a",
"KV",
"-",
"NODE"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L43-L53
|
18,437
|
ethereum/py-trie
|
trie/utils/binaries.py
|
decode_to_bin_keypath
|
def decode_to_bin_keypath(path):
"""
Decodes bytes into a sequence of 0s and 1s
Used in decoding key path of a KV-NODE
"""
path = encode_to_bin(path)
if path[0] == 1:
path = path[4:]
assert path[0:2] == PREFIX_00
padded_len = TWO_BITS.index(path[2:4])
return path[4+((4 - padded_len) % 4):]
|
python
|
def decode_to_bin_keypath(path):
"""
Decodes bytes into a sequence of 0s and 1s
Used in decoding key path of a KV-NODE
"""
path = encode_to_bin(path)
if path[0] == 1:
path = path[4:]
assert path[0:2] == PREFIX_00
padded_len = TWO_BITS.index(path[2:4])
return path[4+((4 - padded_len) % 4):]
|
[
"def",
"decode_to_bin_keypath",
"(",
"path",
")",
":",
"path",
"=",
"encode_to_bin",
"(",
"path",
")",
"if",
"path",
"[",
"0",
"]",
"==",
"1",
":",
"path",
"=",
"path",
"[",
"4",
":",
"]",
"assert",
"path",
"[",
"0",
":",
"2",
"]",
"==",
"PREFIX_00",
"padded_len",
"=",
"TWO_BITS",
".",
"index",
"(",
"path",
"[",
"2",
":",
"4",
"]",
")",
"return",
"path",
"[",
"4",
"+",
"(",
"(",
"4",
"-",
"padded_len",
")",
"%",
"4",
")",
":",
"]"
] |
Decodes bytes into a sequence of 0s and 1s
Used in decoding key path of a KV-NODE
|
[
"Decodes",
"bytes",
"into",
"a",
"sequence",
"of",
"0s",
"and",
"1s",
"Used",
"in",
"decoding",
"key",
"path",
"of",
"a",
"KV",
"-",
"NODE"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L56-L66
|
18,438
|
ethereum/py-trie
|
trie/utils/nibbles.py
|
encode_nibbles
|
def encode_nibbles(nibbles):
"""
The Hex Prefix function
"""
if is_nibbles_terminated(nibbles):
flag = HP_FLAG_2
else:
flag = HP_FLAG_0
raw_nibbles = remove_nibbles_terminator(nibbles)
is_odd = len(raw_nibbles) % 2
if is_odd:
flagged_nibbles = tuple(itertools.chain(
(flag + 1,),
raw_nibbles,
))
else:
flagged_nibbles = tuple(itertools.chain(
(flag, 0),
raw_nibbles,
))
prefixed_value = nibbles_to_bytes(flagged_nibbles)
return prefixed_value
|
python
|
def encode_nibbles(nibbles):
"""
The Hex Prefix function
"""
if is_nibbles_terminated(nibbles):
flag = HP_FLAG_2
else:
flag = HP_FLAG_0
raw_nibbles = remove_nibbles_terminator(nibbles)
is_odd = len(raw_nibbles) % 2
if is_odd:
flagged_nibbles = tuple(itertools.chain(
(flag + 1,),
raw_nibbles,
))
else:
flagged_nibbles = tuple(itertools.chain(
(flag, 0),
raw_nibbles,
))
prefixed_value = nibbles_to_bytes(flagged_nibbles)
return prefixed_value
|
[
"def",
"encode_nibbles",
"(",
"nibbles",
")",
":",
"if",
"is_nibbles_terminated",
"(",
"nibbles",
")",
":",
"flag",
"=",
"HP_FLAG_2",
"else",
":",
"flag",
"=",
"HP_FLAG_0",
"raw_nibbles",
"=",
"remove_nibbles_terminator",
"(",
"nibbles",
")",
"is_odd",
"=",
"len",
"(",
"raw_nibbles",
")",
"%",
"2",
"if",
"is_odd",
":",
"flagged_nibbles",
"=",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"(",
"flag",
"+",
"1",
",",
")",
",",
"raw_nibbles",
",",
")",
")",
"else",
":",
"flagged_nibbles",
"=",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"(",
"flag",
",",
"0",
")",
",",
"raw_nibbles",
",",
")",
")",
"prefixed_value",
"=",
"nibbles_to_bytes",
"(",
"flagged_nibbles",
")",
"return",
"prefixed_value"
] |
The Hex Prefix function
|
[
"The",
"Hex",
"Prefix",
"function"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L78-L104
|
18,439
|
ethereum/py-trie
|
trie/utils/nibbles.py
|
decode_nibbles
|
def decode_nibbles(value):
"""
The inverse of the Hex Prefix function
"""
nibbles_with_flag = bytes_to_nibbles(value)
flag = nibbles_with_flag[0]
needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1}
is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1}
if is_odd_length:
raw_nibbles = nibbles_with_flag[1:]
else:
raw_nibbles = nibbles_with_flag[2:]
if needs_terminator:
nibbles = add_nibbles_terminator(raw_nibbles)
else:
nibbles = raw_nibbles
return nibbles
|
python
|
def decode_nibbles(value):
"""
The inverse of the Hex Prefix function
"""
nibbles_with_flag = bytes_to_nibbles(value)
flag = nibbles_with_flag[0]
needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1}
is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1}
if is_odd_length:
raw_nibbles = nibbles_with_flag[1:]
else:
raw_nibbles = nibbles_with_flag[2:]
if needs_terminator:
nibbles = add_nibbles_terminator(raw_nibbles)
else:
nibbles = raw_nibbles
return nibbles
|
[
"def",
"decode_nibbles",
"(",
"value",
")",
":",
"nibbles_with_flag",
"=",
"bytes_to_nibbles",
"(",
"value",
")",
"flag",
"=",
"nibbles_with_flag",
"[",
"0",
"]",
"needs_terminator",
"=",
"flag",
"in",
"{",
"HP_FLAG_2",
",",
"HP_FLAG_2",
"+",
"1",
"}",
"is_odd_length",
"=",
"flag",
"in",
"{",
"HP_FLAG_0",
"+",
"1",
",",
"HP_FLAG_2",
"+",
"1",
"}",
"if",
"is_odd_length",
":",
"raw_nibbles",
"=",
"nibbles_with_flag",
"[",
"1",
":",
"]",
"else",
":",
"raw_nibbles",
"=",
"nibbles_with_flag",
"[",
"2",
":",
"]",
"if",
"needs_terminator",
":",
"nibbles",
"=",
"add_nibbles_terminator",
"(",
"raw_nibbles",
")",
"else",
":",
"nibbles",
"=",
"raw_nibbles",
"return",
"nibbles"
] |
The inverse of the Hex Prefix function
|
[
"The",
"inverse",
"of",
"the",
"Hex",
"Prefix",
"function"
] |
d33108d21b54d59ee311f61d978496c84a6f1f8b
|
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L107-L127
|
18,440
|
neon-jungle/wagtailvideos
|
wagtailvideos/models.py
|
get_local_file
|
def get_local_file(file):
"""
Get a local version of the file, downloading it from the remote storage if
required. The returned value should be used as a context manager to
ensure any temporary files are cleaned up afterwards.
"""
try:
with open(file.path):
yield file.path
except NotImplementedError:
_, ext = os.path.splitext(file.name)
with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp:
try:
file.open('rb')
for chunk in file.chunks():
tmp.write(chunk)
finally:
file.close()
tmp.flush()
yield tmp.name
|
python
|
def get_local_file(file):
"""
Get a local version of the file, downloading it from the remote storage if
required. The returned value should be used as a context manager to
ensure any temporary files are cleaned up afterwards.
"""
try:
with open(file.path):
yield file.path
except NotImplementedError:
_, ext = os.path.splitext(file.name)
with NamedTemporaryFile(prefix='wagtailvideo-', suffix=ext) as tmp:
try:
file.open('rb')
for chunk in file.chunks():
tmp.write(chunk)
finally:
file.close()
tmp.flush()
yield tmp.name
|
[
"def",
"get_local_file",
"(",
"file",
")",
":",
"try",
":",
"with",
"open",
"(",
"file",
".",
"path",
")",
":",
"yield",
"file",
".",
"path",
"except",
"NotImplementedError",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file",
".",
"name",
")",
"with",
"NamedTemporaryFile",
"(",
"prefix",
"=",
"'wagtailvideo-'",
",",
"suffix",
"=",
"ext",
")",
"as",
"tmp",
":",
"try",
":",
"file",
".",
"open",
"(",
"'rb'",
")",
"for",
"chunk",
"in",
"file",
".",
"chunks",
"(",
")",
":",
"tmp",
".",
"write",
"(",
"chunk",
")",
"finally",
":",
"file",
".",
"close",
"(",
")",
"tmp",
".",
"flush",
"(",
")",
"yield",
"tmp",
".",
"name"
] |
Get a local version of the file, downloading it from the remote storage if
required. The returned value should be used as a context manager to
ensure any temporary files are cleaned up afterwards.
|
[
"Get",
"a",
"local",
"version",
"of",
"the",
"file",
"downloading",
"it",
"from",
"the",
"remote",
"storage",
"if",
"required",
".",
"The",
"returned",
"value",
"should",
"be",
"used",
"as",
"a",
"context",
"manager",
"to",
"ensure",
"any",
"temporary",
"files",
"are",
"cleaned",
"up",
"afterwards",
"."
] |
05a43571ac4b5e7cf07fbb89e804e53447b699c2
|
https://github.com/neon-jungle/wagtailvideos/blob/05a43571ac4b5e7cf07fbb89e804e53447b699c2/wagtailvideos/models.py#L292-L311
|
18,441
|
getsentry/semaphore
|
py/semaphore/utils.py
|
rustcall
|
def rustcall(func, *args):
"""Calls rust method and does some error handling."""
lib.semaphore_err_clear()
rv = func(*args)
err = lib.semaphore_err_get_last_code()
if not err:
return rv
msg = lib.semaphore_err_get_last_message()
cls = exceptions_by_code.get(err, SemaphoreError)
exc = cls(decode_str(msg))
backtrace = decode_str(lib.semaphore_err_get_backtrace())
if backtrace:
exc.rust_info = backtrace
raise exc
|
python
|
def rustcall(func, *args):
"""Calls rust method and does some error handling."""
lib.semaphore_err_clear()
rv = func(*args)
err = lib.semaphore_err_get_last_code()
if not err:
return rv
msg = lib.semaphore_err_get_last_message()
cls = exceptions_by_code.get(err, SemaphoreError)
exc = cls(decode_str(msg))
backtrace = decode_str(lib.semaphore_err_get_backtrace())
if backtrace:
exc.rust_info = backtrace
raise exc
|
[
"def",
"rustcall",
"(",
"func",
",",
"*",
"args",
")",
":",
"lib",
".",
"semaphore_err_clear",
"(",
")",
"rv",
"=",
"func",
"(",
"*",
"args",
")",
"err",
"=",
"lib",
".",
"semaphore_err_get_last_code",
"(",
")",
"if",
"not",
"err",
":",
"return",
"rv",
"msg",
"=",
"lib",
".",
"semaphore_err_get_last_message",
"(",
")",
"cls",
"=",
"exceptions_by_code",
".",
"get",
"(",
"err",
",",
"SemaphoreError",
")",
"exc",
"=",
"cls",
"(",
"decode_str",
"(",
"msg",
")",
")",
"backtrace",
"=",
"decode_str",
"(",
"lib",
".",
"semaphore_err_get_backtrace",
"(",
")",
")",
"if",
"backtrace",
":",
"exc",
".",
"rust_info",
"=",
"backtrace",
"raise",
"exc"
] |
Calls rust method and does some error handling.
|
[
"Calls",
"rust",
"method",
"and",
"does",
"some",
"error",
"handling",
"."
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L17-L30
|
18,442
|
getsentry/semaphore
|
py/semaphore/utils.py
|
decode_str
|
def decode_str(s, free=False):
"""Decodes a SymbolicStr"""
try:
if s.len == 0:
return u""
return ffi.unpack(s.data, s.len).decode("utf-8", "replace")
finally:
if free:
lib.semaphore_str_free(ffi.addressof(s))
|
python
|
def decode_str(s, free=False):
"""Decodes a SymbolicStr"""
try:
if s.len == 0:
return u""
return ffi.unpack(s.data, s.len).decode("utf-8", "replace")
finally:
if free:
lib.semaphore_str_free(ffi.addressof(s))
|
[
"def",
"decode_str",
"(",
"s",
",",
"free",
"=",
"False",
")",
":",
"try",
":",
"if",
"s",
".",
"len",
"==",
"0",
":",
"return",
"u\"\"",
"return",
"ffi",
".",
"unpack",
"(",
"s",
".",
"data",
",",
"s",
".",
"len",
")",
".",
"decode",
"(",
"\"utf-8\"",
",",
"\"replace\"",
")",
"finally",
":",
"if",
"free",
":",
"lib",
".",
"semaphore_str_free",
"(",
"ffi",
".",
"addressof",
"(",
"s",
")",
")"
] |
Decodes a SymbolicStr
|
[
"Decodes",
"a",
"SymbolicStr"
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L69-L77
|
18,443
|
getsentry/semaphore
|
py/semaphore/utils.py
|
encode_str
|
def encode_str(s, mutable=False):
"""Encodes a SemaphoreStr"""
rv = ffi.new("SemaphoreStr *")
if isinstance(s, text_type):
s = s.encode("utf-8")
if mutable:
s = bytearray(s)
rv.data = ffi.from_buffer(s)
rv.len = len(s)
# we have to hold a weak reference here to ensure our string does not
# get collected before the string is used.
attached_refs[rv] = s
return rv
|
python
|
def encode_str(s, mutable=False):
"""Encodes a SemaphoreStr"""
rv = ffi.new("SemaphoreStr *")
if isinstance(s, text_type):
s = s.encode("utf-8")
if mutable:
s = bytearray(s)
rv.data = ffi.from_buffer(s)
rv.len = len(s)
# we have to hold a weak reference here to ensure our string does not
# get collected before the string is used.
attached_refs[rv] = s
return rv
|
[
"def",
"encode_str",
"(",
"s",
",",
"mutable",
"=",
"False",
")",
":",
"rv",
"=",
"ffi",
".",
"new",
"(",
"\"SemaphoreStr *\"",
")",
"if",
"isinstance",
"(",
"s",
",",
"text_type",
")",
":",
"s",
"=",
"s",
".",
"encode",
"(",
"\"utf-8\"",
")",
"if",
"mutable",
":",
"s",
"=",
"bytearray",
"(",
"s",
")",
"rv",
".",
"data",
"=",
"ffi",
".",
"from_buffer",
"(",
"s",
")",
"rv",
".",
"len",
"=",
"len",
"(",
"s",
")",
"# we have to hold a weak reference here to ensure our string does not",
"# get collected before the string is used.",
"attached_refs",
"[",
"rv",
"]",
"=",
"s",
"return",
"rv"
] |
Encodes a SemaphoreStr
|
[
"Encodes",
"a",
"SemaphoreStr"
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L80-L92
|
18,444
|
getsentry/semaphore
|
py/semaphore/utils.py
|
decode_uuid
|
def decode_uuid(value):
"""Decodes the given uuid value."""
return uuid.UUID(bytes=bytes(bytearray(ffi.unpack(value.data, 16))))
|
python
|
def decode_uuid(value):
"""Decodes the given uuid value."""
return uuid.UUID(bytes=bytes(bytearray(ffi.unpack(value.data, 16))))
|
[
"def",
"decode_uuid",
"(",
"value",
")",
":",
"return",
"uuid",
".",
"UUID",
"(",
"bytes",
"=",
"bytes",
"(",
"bytearray",
"(",
"ffi",
".",
"unpack",
"(",
"value",
".",
"data",
",",
"16",
")",
")",
")",
")"
] |
Decodes the given uuid value.
|
[
"Decodes",
"the",
"given",
"uuid",
"value",
"."
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L104-L106
|
18,445
|
getsentry/semaphore
|
scripts/git-precommit-hook.py
|
has_cargo_fmt
|
def has_cargo_fmt():
"""Runs a quick check to see if cargo fmt is installed."""
try:
c = subprocess.Popen(
["cargo", "fmt", "--", "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return c.wait() == 0
except OSError:
return False
|
python
|
def has_cargo_fmt():
"""Runs a quick check to see if cargo fmt is installed."""
try:
c = subprocess.Popen(
["cargo", "fmt", "--", "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return c.wait() == 0
except OSError:
return False
|
[
"def",
"has_cargo_fmt",
"(",
")",
":",
"try",
":",
"c",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"cargo\"",
",",
"\"fmt\"",
",",
"\"--\"",
",",
"\"--help\"",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
")",
"return",
"c",
".",
"wait",
"(",
")",
"==",
"0",
"except",
"OSError",
":",
"return",
"False"
] |
Runs a quick check to see if cargo fmt is installed.
|
[
"Runs",
"a",
"quick",
"check",
"to",
"see",
"if",
"cargo",
"fmt",
"is",
"installed",
"."
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/scripts/git-precommit-hook.py#L8-L18
|
18,446
|
getsentry/semaphore
|
scripts/git-precommit-hook.py
|
get_modified_files
|
def get_modified_files():
"""Returns a list of all modified files."""
c = subprocess.Popen(
["git", "diff-index", "--cached", "--name-only", "HEAD"], stdout=subprocess.PIPE
)
return c.communicate()[0].splitlines()
|
python
|
def get_modified_files():
"""Returns a list of all modified files."""
c = subprocess.Popen(
["git", "diff-index", "--cached", "--name-only", "HEAD"], stdout=subprocess.PIPE
)
return c.communicate()[0].splitlines()
|
[
"def",
"get_modified_files",
"(",
")",
":",
"c",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"git\"",
",",
"\"diff-index\"",
",",
"\"--cached\"",
",",
"\"--name-only\"",
",",
"\"HEAD\"",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"return",
"c",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"splitlines",
"(",
")"
] |
Returns a list of all modified files.
|
[
"Returns",
"a",
"list",
"of",
"all",
"modified",
"files",
"."
] |
6f260b4092261e893b4debd9a3a7a78232f46c5e
|
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/scripts/git-precommit-hook.py#L21-L26
|
18,447
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
_get_next_chunk
|
def _get_next_chunk(fp, previously_read_position, chunk_size):
"""Return next chunk of data that we would from the file pointer.
Args:
fp: file-like object
previously_read_position: file pointer position that we have read from
chunk_size: desired read chunk_size
Returns:
(bytestring, int): data that has been read in, the file pointer position where the data has been read from
"""
seek_position, read_size = _get_what_to_read_next(fp, previously_read_position, chunk_size)
fp.seek(seek_position)
read_content = fp.read(read_size)
read_position = seek_position
return read_content, read_position
|
python
|
def _get_next_chunk(fp, previously_read_position, chunk_size):
"""Return next chunk of data that we would from the file pointer.
Args:
fp: file-like object
previously_read_position: file pointer position that we have read from
chunk_size: desired read chunk_size
Returns:
(bytestring, int): data that has been read in, the file pointer position where the data has been read from
"""
seek_position, read_size = _get_what_to_read_next(fp, previously_read_position, chunk_size)
fp.seek(seek_position)
read_content = fp.read(read_size)
read_position = seek_position
return read_content, read_position
|
[
"def",
"_get_next_chunk",
"(",
"fp",
",",
"previously_read_position",
",",
"chunk_size",
")",
":",
"seek_position",
",",
"read_size",
"=",
"_get_what_to_read_next",
"(",
"fp",
",",
"previously_read_position",
",",
"chunk_size",
")",
"fp",
".",
"seek",
"(",
"seek_position",
")",
"read_content",
"=",
"fp",
".",
"read",
"(",
"read_size",
")",
"read_position",
"=",
"seek_position",
"return",
"read_content",
",",
"read_position"
] |
Return next chunk of data that we would from the file pointer.
Args:
fp: file-like object
previously_read_position: file pointer position that we have read from
chunk_size: desired read chunk_size
Returns:
(bytestring, int): data that has been read in, the file pointer position where the data has been read from
|
[
"Return",
"next",
"chunk",
"of",
"data",
"that",
"we",
"would",
"from",
"the",
"file",
"pointer",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L95-L110
|
18,448
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
_get_what_to_read_next
|
def _get_what_to_read_next(fp, previously_read_position, chunk_size):
"""Return information on which file pointer position to read from and how many bytes.
Args:
fp
past_read_positon (int): The file pointer position that has been read previously
chunk_size(int): ideal io chunk_size
Returns:
(int, int): The next seek position, how many bytes to read next
"""
seek_position = max(previously_read_position - chunk_size, 0)
read_size = chunk_size
# examples: say, our new_lines are potentially "\r\n", "\n", "\r"
# find a reading point where it is not "\n", rewind further if necessary
# if we have "\r\n" and we read in "\n",
# the next iteration would treat "\r" as a different new line.
# Q: why don't I just check if it is b"\n", but use a function ?
# A: so that we can potentially expand this into generic sets of separators, later on.
while seek_position > 0:
fp.seek(seek_position)
if _is_partially_read_new_line(fp.read(1)):
seek_position -= 1
read_size += 1 # as we rewind further, let's make sure we read more to compensate
else:
break
# take care of special case when we are back to the beginnin of the file
read_size = min(previously_read_position - seek_position, read_size)
return seek_position, read_size
|
python
|
def _get_what_to_read_next(fp, previously_read_position, chunk_size):
"""Return information on which file pointer position to read from and how many bytes.
Args:
fp
past_read_positon (int): The file pointer position that has been read previously
chunk_size(int): ideal io chunk_size
Returns:
(int, int): The next seek position, how many bytes to read next
"""
seek_position = max(previously_read_position - chunk_size, 0)
read_size = chunk_size
# examples: say, our new_lines are potentially "\r\n", "\n", "\r"
# find a reading point where it is not "\n", rewind further if necessary
# if we have "\r\n" and we read in "\n",
# the next iteration would treat "\r" as a different new line.
# Q: why don't I just check if it is b"\n", but use a function ?
# A: so that we can potentially expand this into generic sets of separators, later on.
while seek_position > 0:
fp.seek(seek_position)
if _is_partially_read_new_line(fp.read(1)):
seek_position -= 1
read_size += 1 # as we rewind further, let's make sure we read more to compensate
else:
break
# take care of special case when we are back to the beginnin of the file
read_size = min(previously_read_position - seek_position, read_size)
return seek_position, read_size
|
[
"def",
"_get_what_to_read_next",
"(",
"fp",
",",
"previously_read_position",
",",
"chunk_size",
")",
":",
"seek_position",
"=",
"max",
"(",
"previously_read_position",
"-",
"chunk_size",
",",
"0",
")",
"read_size",
"=",
"chunk_size",
"# examples: say, our new_lines are potentially \"\\r\\n\", \"\\n\", \"\\r\"",
"# find a reading point where it is not \"\\n\", rewind further if necessary",
"# if we have \"\\r\\n\" and we read in \"\\n\",",
"# the next iteration would treat \"\\r\" as a different new line.",
"# Q: why don't I just check if it is b\"\\n\", but use a function ?",
"# A: so that we can potentially expand this into generic sets of separators, later on.",
"while",
"seek_position",
">",
"0",
":",
"fp",
".",
"seek",
"(",
"seek_position",
")",
"if",
"_is_partially_read_new_line",
"(",
"fp",
".",
"read",
"(",
"1",
")",
")",
":",
"seek_position",
"-=",
"1",
"read_size",
"+=",
"1",
"# as we rewind further, let's make sure we read more to compensate",
"else",
":",
"break",
"# take care of special case when we are back to the beginnin of the file",
"read_size",
"=",
"min",
"(",
"previously_read_position",
"-",
"seek_position",
",",
"read_size",
")",
"return",
"seek_position",
",",
"read_size"
] |
Return information on which file pointer position to read from and how many bytes.
Args:
fp
past_read_positon (int): The file pointer position that has been read previously
chunk_size(int): ideal io chunk_size
Returns:
(int, int): The next seek position, how many bytes to read next
|
[
"Return",
"information",
"on",
"which",
"file",
"pointer",
"position",
"to",
"read",
"from",
"and",
"how",
"many",
"bytes",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L113-L143
|
18,449
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
_remove_trailing_new_line
|
def _remove_trailing_new_line(l):
"""Remove a single instance of new line at the end of l if it exists.
Returns:
bytestring
"""
# replace only 1 instance of newline
# match longest line first (hence the reverse=True), we want to match "\r\n" rather than "\n" if we can
for n in sorted(new_lines_bytes, key=lambda x: len(x), reverse=True):
if l.endswith(n):
remove_new_line = slice(None, -len(n))
return l[remove_new_line]
return l
|
python
|
def _remove_trailing_new_line(l):
"""Remove a single instance of new line at the end of l if it exists.
Returns:
bytestring
"""
# replace only 1 instance of newline
# match longest line first (hence the reverse=True), we want to match "\r\n" rather than "\n" if we can
for n in sorted(new_lines_bytes, key=lambda x: len(x), reverse=True):
if l.endswith(n):
remove_new_line = slice(None, -len(n))
return l[remove_new_line]
return l
|
[
"def",
"_remove_trailing_new_line",
"(",
"l",
")",
":",
"# replace only 1 instance of newline",
"# match longest line first (hence the reverse=True), we want to match \"\\r\\n\" rather than \"\\n\" if we can",
"for",
"n",
"in",
"sorted",
"(",
"new_lines_bytes",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
")",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"l",
".",
"endswith",
"(",
"n",
")",
":",
"remove_new_line",
"=",
"slice",
"(",
"None",
",",
"-",
"len",
"(",
"n",
")",
")",
"return",
"l",
"[",
"remove_new_line",
"]",
"return",
"l"
] |
Remove a single instance of new line at the end of l if it exists.
Returns:
bytestring
|
[
"Remove",
"a",
"single",
"instance",
"of",
"new",
"line",
"at",
"the",
"end",
"of",
"l",
"if",
"it",
"exists",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L146-L158
|
18,450
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
_find_furthest_new_line
|
def _find_furthest_new_line(read_buffer):
"""Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.
Args:
read_buffer (bytestring)
Returns:
int: The right most position of new line character in read_buffer if found, else -1
"""
new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]
return max(new_line_positions)
|
python
|
def _find_furthest_new_line(read_buffer):
"""Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.
Args:
read_buffer (bytestring)
Returns:
int: The right most position of new line character in read_buffer if found, else -1
"""
new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]
return max(new_line_positions)
|
[
"def",
"_find_furthest_new_line",
"(",
"read_buffer",
")",
":",
"new_line_positions",
"=",
"[",
"read_buffer",
".",
"rfind",
"(",
"n",
")",
"for",
"n",
"in",
"new_lines_bytes",
"]",
"return",
"max",
"(",
"new_line_positions",
")"
] |
Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.
Args:
read_buffer (bytestring)
Returns:
int: The right most position of new line character in read_buffer if found, else -1
|
[
"Return",
"-",
"1",
"if",
"read_buffer",
"does",
"not",
"contain",
"new",
"line",
"otherwise",
"the",
"position",
"of",
"the",
"rightmost",
"newline",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L161-L171
|
18,451
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
BufferWorkSpace.add_to_buffer
|
def add_to_buffer(self, content, read_position):
"""Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
"""
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer
|
python
|
def add_to_buffer(self, content, read_position):
"""Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
"""
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer
|
[
"def",
"add_to_buffer",
"(",
"self",
",",
"content",
",",
"read_position",
")",
":",
"self",
".",
"read_position",
"=",
"read_position",
"if",
"self",
".",
"read_buffer",
"is",
"None",
":",
"self",
".",
"read_buffer",
"=",
"content",
"else",
":",
"self",
".",
"read_buffer",
"=",
"content",
"+",
"self",
".",
"read_buffer"
] |
Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
|
[
"Add",
"additional",
"bytes",
"content",
"as",
"read",
"from",
"the",
"read_position",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L29-L40
|
18,452
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
BufferWorkSpace.yieldable
|
def yieldable(self):
"""Return True if there is a line that the buffer can return, False otherwise."""
if self.read_buffer is None:
return False
t = _remove_trailing_new_line(self.read_buffer)
n = _find_furthest_new_line(t)
if n >= 0:
return True
# we have read in entire file and have some unprocessed lines
if self.read_position == 0 and self.read_buffer is not None:
return True
return False
|
python
|
def yieldable(self):
"""Return True if there is a line that the buffer can return, False otherwise."""
if self.read_buffer is None:
return False
t = _remove_trailing_new_line(self.read_buffer)
n = _find_furthest_new_line(t)
if n >= 0:
return True
# we have read in entire file and have some unprocessed lines
if self.read_position == 0 and self.read_buffer is not None:
return True
return False
|
[
"def",
"yieldable",
"(",
"self",
")",
":",
"if",
"self",
".",
"read_buffer",
"is",
"None",
":",
"return",
"False",
"t",
"=",
"_remove_trailing_new_line",
"(",
"self",
".",
"read_buffer",
")",
"n",
"=",
"_find_furthest_new_line",
"(",
"t",
")",
"if",
"n",
">=",
"0",
":",
"return",
"True",
"# we have read in entire file and have some unprocessed lines",
"if",
"self",
".",
"read_position",
"==",
"0",
"and",
"self",
".",
"read_buffer",
"is",
"not",
"None",
":",
"return",
"True",
"return",
"False"
] |
Return True if there is a line that the buffer can return, False otherwise.
|
[
"Return",
"True",
"if",
"there",
"is",
"a",
"line",
"that",
"the",
"buffer",
"can",
"return",
"False",
"otherwise",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L42-L55
|
18,453
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
BufferWorkSpace.return_line
|
def return_line(self):
"""Return a new line if it is available.
Precondition: self.yieldable() must be True
"""
assert(self.yieldable())
t = _remove_trailing_new_line(self.read_buffer)
i = _find_furthest_new_line(t)
if i >= 0:
l = i + 1
after_new_line = slice(l, None)
up_to_include_new_line = slice(0, l)
r = t[after_new_line]
self.read_buffer = t[up_to_include_new_line]
else: # the case where we have read in entire file and at the "last" line
r = t
self.read_buffer = None
return r
|
python
|
def return_line(self):
"""Return a new line if it is available.
Precondition: self.yieldable() must be True
"""
assert(self.yieldable())
t = _remove_trailing_new_line(self.read_buffer)
i = _find_furthest_new_line(t)
if i >= 0:
l = i + 1
after_new_line = slice(l, None)
up_to_include_new_line = slice(0, l)
r = t[after_new_line]
self.read_buffer = t[up_to_include_new_line]
else: # the case where we have read in entire file and at the "last" line
r = t
self.read_buffer = None
return r
|
[
"def",
"return_line",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"yieldable",
"(",
")",
")",
"t",
"=",
"_remove_trailing_new_line",
"(",
"self",
".",
"read_buffer",
")",
"i",
"=",
"_find_furthest_new_line",
"(",
"t",
")",
"if",
"i",
">=",
"0",
":",
"l",
"=",
"i",
"+",
"1",
"after_new_line",
"=",
"slice",
"(",
"l",
",",
"None",
")",
"up_to_include_new_line",
"=",
"slice",
"(",
"0",
",",
"l",
")",
"r",
"=",
"t",
"[",
"after_new_line",
"]",
"self",
".",
"read_buffer",
"=",
"t",
"[",
"up_to_include_new_line",
"]",
"else",
":",
"# the case where we have read in entire file and at the \"last\" line",
"r",
"=",
"t",
"self",
".",
"read_buffer",
"=",
"None",
"return",
"r"
] |
Return a new line if it is available.
Precondition: self.yieldable() must be True
|
[
"Return",
"a",
"new",
"line",
"if",
"it",
"is",
"available",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L57-L76
|
18,454
|
RobinNil/file_read_backwards
|
file_read_backwards/buffer_work_space.py
|
BufferWorkSpace.read_until_yieldable
|
def read_until_yieldable(self):
"""Read in additional chunks until it is yieldable."""
while not self.yieldable():
read_content, read_position = _get_next_chunk(self.fp, self.read_position, self.chunk_size)
self.add_to_buffer(read_content, read_position)
|
python
|
def read_until_yieldable(self):
"""Read in additional chunks until it is yieldable."""
while not self.yieldable():
read_content, read_position = _get_next_chunk(self.fp, self.read_position, self.chunk_size)
self.add_to_buffer(read_content, read_position)
|
[
"def",
"read_until_yieldable",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"yieldable",
"(",
")",
":",
"read_content",
",",
"read_position",
"=",
"_get_next_chunk",
"(",
"self",
".",
"fp",
",",
"self",
".",
"read_position",
",",
"self",
".",
"chunk_size",
")",
"self",
".",
"add_to_buffer",
"(",
"read_content",
",",
"read_position",
")"
] |
Read in additional chunks until it is yieldable.
|
[
"Read",
"in",
"additional",
"chunks",
"until",
"it",
"is",
"yieldable",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/buffer_work_space.py#L78-L82
|
18,455
|
RobinNil/file_read_backwards
|
file_read_backwards/file_read_backwards.py
|
FileReadBackwardsIterator.next
|
def next(self):
"""Returns unicode string from the last line until the beginning of file.
Gets exhausted if::
* already reached the beginning of the file on previous iteration
* the file got closed
When it gets exhausted, it closes the file handler.
"""
# Using binary mode, because some encodings such as "utf-8" use variable number of
# bytes to encode different Unicode points.
# Without using binary mode, we would probably need to understand each encoding more
# and do the seek operations to find the proper boundary before issuing read
if self.closed:
raise StopIteration
if self.__buf.has_returned_every_line():
self.close()
raise StopIteration
self.__buf.read_until_yieldable()
r = self.__buf.return_line()
return r.decode(self.encoding)
|
python
|
def next(self):
"""Returns unicode string from the last line until the beginning of file.
Gets exhausted if::
* already reached the beginning of the file on previous iteration
* the file got closed
When it gets exhausted, it closes the file handler.
"""
# Using binary mode, because some encodings such as "utf-8" use variable number of
# bytes to encode different Unicode points.
# Without using binary mode, we would probably need to understand each encoding more
# and do the seek operations to find the proper boundary before issuing read
if self.closed:
raise StopIteration
if self.__buf.has_returned_every_line():
self.close()
raise StopIteration
self.__buf.read_until_yieldable()
r = self.__buf.return_line()
return r.decode(self.encoding)
|
[
"def",
"next",
"(",
"self",
")",
":",
"# Using binary mode, because some encodings such as \"utf-8\" use variable number of",
"# bytes to encode different Unicode points.",
"# Without using binary mode, we would probably need to understand each encoding more",
"# and do the seek operations to find the proper boundary before issuing read",
"if",
"self",
".",
"closed",
":",
"raise",
"StopIteration",
"if",
"self",
".",
"__buf",
".",
"has_returned_every_line",
"(",
")",
":",
"self",
".",
"close",
"(",
")",
"raise",
"StopIteration",
"self",
".",
"__buf",
".",
"read_until_yieldable",
"(",
")",
"r",
"=",
"self",
".",
"__buf",
".",
"return_line",
"(",
")",
"return",
"r",
".",
"decode",
"(",
"self",
".",
"encoding",
")"
] |
Returns unicode string from the last line until the beginning of file.
Gets exhausted if::
* already reached the beginning of the file on previous iteration
* the file got closed
When it gets exhausted, it closes the file handler.
|
[
"Returns",
"unicode",
"string",
"from",
"the",
"last",
"line",
"until",
"the",
"beginning",
"of",
"file",
"."
] |
e56443095b58aae309fbc43a0943eba867dc8500
|
https://github.com/RobinNil/file_read_backwards/blob/e56443095b58aae309fbc43a0943eba867dc8500/file_read_backwards/file_read_backwards.py#L91-L112
|
18,456
|
hendrix/hendrix
|
examples/django_hx_chatserver/example_app/chat/views.py
|
home
|
def home(request, chat_channel_name=None):
"""
if we have a chat_channel_name kwarg,
have the response include that channel name
so the javascript knows to subscribe to that
channel...
"""
if not chat_channel_name:
chat_channel_name = 'homepage'
context = {
'address': chat_channel_name,
'history': [],
}
if ChatMessage.objects.filter(channel=chat_channel_name).exists():
context['history'] = ChatMessage.objects.filter(
channel=chat_channel_name)
# TODO add https
websocket_prefix = "ws"
websocket_port = 9000
context['websocket_prefix'] = websocket_prefix
context['websocket_port'] = websocket_port
return render(request, 'chat.html', context)
|
python
|
def home(request, chat_channel_name=None):
"""
if we have a chat_channel_name kwarg,
have the response include that channel name
so the javascript knows to subscribe to that
channel...
"""
if not chat_channel_name:
chat_channel_name = 'homepage'
context = {
'address': chat_channel_name,
'history': [],
}
if ChatMessage.objects.filter(channel=chat_channel_name).exists():
context['history'] = ChatMessage.objects.filter(
channel=chat_channel_name)
# TODO add https
websocket_prefix = "ws"
websocket_port = 9000
context['websocket_prefix'] = websocket_prefix
context['websocket_port'] = websocket_port
return render(request, 'chat.html', context)
|
[
"def",
"home",
"(",
"request",
",",
"chat_channel_name",
"=",
"None",
")",
":",
"if",
"not",
"chat_channel_name",
":",
"chat_channel_name",
"=",
"'homepage'",
"context",
"=",
"{",
"'address'",
":",
"chat_channel_name",
",",
"'history'",
":",
"[",
"]",
",",
"}",
"if",
"ChatMessage",
".",
"objects",
".",
"filter",
"(",
"channel",
"=",
"chat_channel_name",
")",
".",
"exists",
"(",
")",
":",
"context",
"[",
"'history'",
"]",
"=",
"ChatMessage",
".",
"objects",
".",
"filter",
"(",
"channel",
"=",
"chat_channel_name",
")",
"# TODO add https ",
"websocket_prefix",
"=",
"\"ws\"",
"websocket_port",
"=",
"9000",
"context",
"[",
"'websocket_prefix'",
"]",
"=",
"websocket_prefix",
"context",
"[",
"'websocket_port'",
"]",
"=",
"websocket_port",
"return",
"render",
"(",
"request",
",",
"'chat.html'",
",",
"context",
")"
] |
if we have a chat_channel_name kwarg,
have the response include that channel name
so the javascript knows to subscribe to that
channel...
|
[
"if",
"we",
"have",
"a",
"chat_channel_name",
"kwarg",
"have",
"the",
"response",
"include",
"that",
"channel",
"name",
"so",
"the",
"javascript",
"knows",
"to",
"subscribe",
"to",
"that",
"channel",
"..."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/examples/django_hx_chatserver/example_app/chat/views.py#L9-L37
|
18,457
|
hendrix/hendrix
|
hendrix/ux.py
|
hendrixLauncher
|
def hendrixLauncher(action, options, with_tiempo=False):
"""
Decides which version of HendrixDeploy to use and then
launches it.
"""
if options['key'] and options['cert'] and options['cache']:
from hendrix.deploy import hybrid
HendrixDeploy = hybrid.HendrixDeployHybrid
elif options['key'] and options['cert']:
from hendrix.deploy import tls
HendrixDeploy = tls.HendrixDeployTLS
elif options['cache']:
HendrixDeploy = cache.HendrixDeployCache
else:
HendrixDeploy = base.HendrixDeploy
if with_tiempo:
deploy = HendrixDeploy(action='start', options=options)
deploy.run()
else:
deploy = HendrixDeploy(action, options)
deploy.run()
|
python
|
def hendrixLauncher(action, options, with_tiempo=False):
"""
Decides which version of HendrixDeploy to use and then
launches it.
"""
if options['key'] and options['cert'] and options['cache']:
from hendrix.deploy import hybrid
HendrixDeploy = hybrid.HendrixDeployHybrid
elif options['key'] and options['cert']:
from hendrix.deploy import tls
HendrixDeploy = tls.HendrixDeployTLS
elif options['cache']:
HendrixDeploy = cache.HendrixDeployCache
else:
HendrixDeploy = base.HendrixDeploy
if with_tiempo:
deploy = HendrixDeploy(action='start', options=options)
deploy.run()
else:
deploy = HendrixDeploy(action, options)
deploy.run()
|
[
"def",
"hendrixLauncher",
"(",
"action",
",",
"options",
",",
"with_tiempo",
"=",
"False",
")",
":",
"if",
"options",
"[",
"'key'",
"]",
"and",
"options",
"[",
"'cert'",
"]",
"and",
"options",
"[",
"'cache'",
"]",
":",
"from",
"hendrix",
".",
"deploy",
"import",
"hybrid",
"HendrixDeploy",
"=",
"hybrid",
".",
"HendrixDeployHybrid",
"elif",
"options",
"[",
"'key'",
"]",
"and",
"options",
"[",
"'cert'",
"]",
":",
"from",
"hendrix",
".",
"deploy",
"import",
"tls",
"HendrixDeploy",
"=",
"tls",
".",
"HendrixDeployTLS",
"elif",
"options",
"[",
"'cache'",
"]",
":",
"HendrixDeploy",
"=",
"cache",
".",
"HendrixDeployCache",
"else",
":",
"HendrixDeploy",
"=",
"base",
".",
"HendrixDeploy",
"if",
"with_tiempo",
":",
"deploy",
"=",
"HendrixDeploy",
"(",
"action",
"=",
"'start'",
",",
"options",
"=",
"options",
")",
"deploy",
".",
"run",
"(",
")",
"else",
":",
"deploy",
"=",
"HendrixDeploy",
"(",
"action",
",",
"options",
")",
"deploy",
".",
"run",
"(",
")"
] |
Decides which version of HendrixDeploy to use and then
launches it.
|
[
"Decides",
"which",
"version",
"of",
"HendrixDeploy",
"to",
"use",
"and",
"then",
"launches",
"it",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L67-L87
|
18,458
|
hendrix/hendrix
|
hendrix/ux.py
|
logReload
|
def logReload(options):
"""
encompasses all the logic for reloading observer.
"""
event_handler = Reload(options)
observer = Observer()
observer.schedule(event_handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
pid = os.getpid()
chalk.eraser()
chalk.green('\nHendrix successfully closed.')
os.kill(pid, 15)
observer.join()
exit('\n')
|
python
|
def logReload(options):
"""
encompasses all the logic for reloading observer.
"""
event_handler = Reload(options)
observer = Observer()
observer.schedule(event_handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
pid = os.getpid()
chalk.eraser()
chalk.green('\nHendrix successfully closed.')
os.kill(pid, 15)
observer.join()
exit('\n')
|
[
"def",
"logReload",
"(",
"options",
")",
":",
"event_handler",
"=",
"Reload",
"(",
"options",
")",
"observer",
"=",
"Observer",
"(",
")",
"observer",
".",
"schedule",
"(",
"event_handler",
",",
"path",
"=",
"'.'",
",",
"recursive",
"=",
"True",
")",
"observer",
".",
"start",
"(",
")",
"try",
":",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"observer",
".",
"stop",
"(",
")",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"chalk",
".",
"eraser",
"(",
")",
"chalk",
".",
"green",
"(",
"'\\nHendrix successfully closed.'",
")",
"os",
".",
"kill",
"(",
"pid",
",",
"15",
")",
"observer",
".",
"join",
"(",
")",
"exit",
"(",
"'\\n'",
")"
] |
encompasses all the logic for reloading observer.
|
[
"encompasses",
"all",
"the",
"logic",
"for",
"reloading",
"observer",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L100-L118
|
18,459
|
hendrix/hendrix
|
hendrix/ux.py
|
launch
|
def launch(*args, **options):
"""
launch acts on the user specified action and options by executing
Hedrix.run
"""
action = args[0]
if options['reload']:
logReload(options)
else:
assignDeploymentInstance(action, options)
|
python
|
def launch(*args, **options):
"""
launch acts on the user specified action and options by executing
Hedrix.run
"""
action = args[0]
if options['reload']:
logReload(options)
else:
assignDeploymentInstance(action, options)
|
[
"def",
"launch",
"(",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"action",
"=",
"args",
"[",
"0",
"]",
"if",
"options",
"[",
"'reload'",
"]",
":",
"logReload",
"(",
"options",
")",
"else",
":",
"assignDeploymentInstance",
"(",
"action",
",",
"options",
")"
] |
launch acts on the user specified action and options by executing
Hedrix.run
|
[
"launch",
"acts",
"on",
"the",
"user",
"specified",
"action",
"and",
"options",
"by",
"executing",
"Hedrix",
".",
"run"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L121-L130
|
18,460
|
hendrix/hendrix
|
hendrix/ux.py
|
findSettingsModule
|
def findSettingsModule():
"Find the settings module dot path within django's manage.py file"
try:
with open('manage.py', 'r') as manage:
manage_contents = manage.read()
search = re.search(
r"([\"\'](?P<module>[a-z\.]+)[\"\'])", manage_contents
)
if search: # django version < 1.7
settings_mod = search.group("module")
else:
# in 1.7, manage.py settings declaration looks like:
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "example_app.settings"
# )
search = re.search(
"\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$",
manage_contents, re.I | re.S | re.M
)
settings_mod = search.group("module")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_mod)
except IOError as e:
msg = (
str(e) + '\nPlease ensure that you are in the same directory '
'as django\'s "manage.py" file.'
)
raise IOError(chalk.red(msg), None, sys.exc_info()[2])
except AttributeError:
settings_mod = ''
return settings_mod
|
python
|
def findSettingsModule():
"Find the settings module dot path within django's manage.py file"
try:
with open('manage.py', 'r') as manage:
manage_contents = manage.read()
search = re.search(
r"([\"\'](?P<module>[a-z\.]+)[\"\'])", manage_contents
)
if search: # django version < 1.7
settings_mod = search.group("module")
else:
# in 1.7, manage.py settings declaration looks like:
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "example_app.settings"
# )
search = re.search(
"\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$",
manage_contents, re.I | re.S | re.M
)
settings_mod = search.group("module")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_mod)
except IOError as e:
msg = (
str(e) + '\nPlease ensure that you are in the same directory '
'as django\'s "manage.py" file.'
)
raise IOError(chalk.red(msg), None, sys.exc_info()[2])
except AttributeError:
settings_mod = ''
return settings_mod
|
[
"def",
"findSettingsModule",
"(",
")",
":",
"try",
":",
"with",
"open",
"(",
"'manage.py'",
",",
"'r'",
")",
"as",
"manage",
":",
"manage_contents",
"=",
"manage",
".",
"read",
"(",
")",
"search",
"=",
"re",
".",
"search",
"(",
"r\"([\\\"\\'](?P<module>[a-z\\.]+)[\\\"\\'])\"",
",",
"manage_contents",
")",
"if",
"search",
":",
"# django version < 1.7",
"settings_mod",
"=",
"search",
".",
"group",
"(",
"\"module\"",
")",
"else",
":",
"# in 1.7, manage.py settings declaration looks like:",
"# os.environ.setdefault(",
"# \"DJANGO_SETTINGS_MODULE\", \"example_app.settings\"",
"# )",
"search",
"=",
"re",
".",
"search",
"(",
"\"\\\".*?\\\"(,\\\\s)??\\\"(?P<module>.*?)\\\"\\\\)$\"",
",",
"manage_contents",
",",
"re",
".",
"I",
"|",
"re",
".",
"S",
"|",
"re",
".",
"M",
")",
"settings_mod",
"=",
"search",
".",
"group",
"(",
"\"module\"",
")",
"os",
".",
"environ",
".",
"setdefault",
"(",
"'DJANGO_SETTINGS_MODULE'",
",",
"settings_mod",
")",
"except",
"IOError",
"as",
"e",
":",
"msg",
"=",
"(",
"str",
"(",
"e",
")",
"+",
"'\\nPlease ensure that you are in the same directory '",
"'as django\\'s \"manage.py\" file.'",
")",
"raise",
"IOError",
"(",
"chalk",
".",
"red",
"(",
"msg",
")",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"except",
"AttributeError",
":",
"settings_mod",
"=",
"''",
"return",
"settings_mod"
] |
Find the settings module dot path within django's manage.py file
|
[
"Find",
"the",
"settings",
"module",
"dot",
"path",
"within",
"django",
"s",
"manage",
".",
"py",
"file"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L133-L165
|
18,461
|
hendrix/hendrix
|
hendrix/ux.py
|
subprocessLaunch
|
def subprocessLaunch():
"""
This function is called by the hxw script.
It takes no arguments, and returns an instance of HendrixDeploy
"""
if not redis_available:
raise RedisException("can't launch this subprocess without tiempo/redis.")
try:
action = 'start'
options = REDIS.get('worker_args')
assignDeploymentInstance(action='start', options=options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise
|
python
|
def subprocessLaunch():
"""
This function is called by the hxw script.
It takes no arguments, and returns an instance of HendrixDeploy
"""
if not redis_available:
raise RedisException("can't launch this subprocess without tiempo/redis.")
try:
action = 'start'
options = REDIS.get('worker_args')
assignDeploymentInstance(action='start', options=options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise
|
[
"def",
"subprocessLaunch",
"(",
")",
":",
"if",
"not",
"redis_available",
":",
"raise",
"RedisException",
"(",
"\"can't launch this subprocess without tiempo/redis.\"",
")",
"try",
":",
"action",
"=",
"'start'",
"options",
"=",
"REDIS",
".",
"get",
"(",
"'worker_args'",
")",
"assignDeploymentInstance",
"(",
"action",
"=",
"'start'",
",",
"options",
"=",
"options",
")",
"except",
"Exception",
":",
"chalk",
".",
"red",
"(",
"'\\n Encountered an unhandled exception while trying to %s hendrix.\\n'",
"%",
"action",
",",
"pipe",
"=",
"chalk",
".",
"stderr",
")",
"raise"
] |
This function is called by the hxw script.
It takes no arguments, and returns an instance of HendrixDeploy
|
[
"This",
"function",
"is",
"called",
"by",
"the",
"hxw",
"script",
".",
"It",
"takes",
"no",
"arguments",
"and",
"returns",
"an",
"instance",
"of",
"HendrixDeploy"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L227-L240
|
18,462
|
hendrix/hendrix
|
hendrix/ux.py
|
main
|
def main(args=None):
"The function to execute when running hx"
if args is None:
args = sys.argv[1:]
options, args = HendrixOptionParser.parse_args(args)
options = vars(options)
try:
action = args[0]
except IndexError:
HendrixOptionParser.print_help()
return
exposeProject(options)
options = djangoVsWsgi(options)
options = devFriendly(options)
redirect = noiseControl(options)
try:
launch(*args, **options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise
|
python
|
def main(args=None):
"The function to execute when running hx"
if args is None:
args = sys.argv[1:]
options, args = HendrixOptionParser.parse_args(args)
options = vars(options)
try:
action = args[0]
except IndexError:
HendrixOptionParser.print_help()
return
exposeProject(options)
options = djangoVsWsgi(options)
options = devFriendly(options)
redirect = noiseControl(options)
try:
launch(*args, **options)
except Exception:
chalk.red('\n Encountered an unhandled exception while trying to %s hendrix.\n' % action, pipe=chalk.stderr)
raise
|
[
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"options",
",",
"args",
"=",
"HendrixOptionParser",
".",
"parse_args",
"(",
"args",
")",
"options",
"=",
"vars",
"(",
"options",
")",
"try",
":",
"action",
"=",
"args",
"[",
"0",
"]",
"except",
"IndexError",
":",
"HendrixOptionParser",
".",
"print_help",
"(",
")",
"return",
"exposeProject",
"(",
"options",
")",
"options",
"=",
"djangoVsWsgi",
"(",
"options",
")",
"options",
"=",
"devFriendly",
"(",
"options",
")",
"redirect",
"=",
"noiseControl",
"(",
"options",
")",
"try",
":",
"launch",
"(",
"*",
"args",
",",
"*",
"*",
"options",
")",
"except",
"Exception",
":",
"chalk",
".",
"red",
"(",
"'\\n Encountered an unhandled exception while trying to %s hendrix.\\n'",
"%",
"action",
",",
"pipe",
"=",
"chalk",
".",
"stderr",
")",
"raise"
] |
The function to execute when running hx
|
[
"The",
"function",
"to",
"execute",
"when",
"running",
"hx"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/ux.py#L243-L268
|
18,463
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheClient.handleHeader
|
def handleHeader(self, key, value):
"extends handleHeader to save headers to a local response object"
key_lower = key.lower()
if key_lower == 'location':
value = self.modLocationPort(value)
self._response.headers[key_lower] = value
if key_lower != 'cache-control':
# This causes us to not pass on the 'cache-control' parameter
# to the browser
# TODO: we should have a means of giving the user the option to
# configure how they want to manage browser-side cache control
proxy.ProxyClient.handleHeader(self, key, value)
|
python
|
def handleHeader(self, key, value):
"extends handleHeader to save headers to a local response object"
key_lower = key.lower()
if key_lower == 'location':
value = self.modLocationPort(value)
self._response.headers[key_lower] = value
if key_lower != 'cache-control':
# This causes us to not pass on the 'cache-control' parameter
# to the browser
# TODO: we should have a means of giving the user the option to
# configure how they want to manage browser-side cache control
proxy.ProxyClient.handleHeader(self, key, value)
|
[
"def",
"handleHeader",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"key_lower",
"=",
"key",
".",
"lower",
"(",
")",
"if",
"key_lower",
"==",
"'location'",
":",
"value",
"=",
"self",
".",
"modLocationPort",
"(",
"value",
")",
"self",
".",
"_response",
".",
"headers",
"[",
"key_lower",
"]",
"=",
"value",
"if",
"key_lower",
"!=",
"'cache-control'",
":",
"# This causes us to not pass on the 'cache-control' parameter",
"# to the browser",
"# TODO: we should have a means of giving the user the option to",
"# configure how they want to manage browser-side cache control",
"proxy",
".",
"ProxyClient",
".",
"handleHeader",
"(",
"self",
",",
"key",
",",
"value",
")"
] |
extends handleHeader to save headers to a local response object
|
[
"extends",
"handleHeader",
"to",
"save",
"headers",
"to",
"a",
"local",
"response",
"object"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L38-L49
|
18,464
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheClient.handleStatus
|
def handleStatus(self, version, code, message):
"extends handleStatus to instantiate a local response object"
proxy.ProxyClient.handleStatus(self, version, code, message)
# client.Response is currently just a container for needed data
self._response = client.Response(version, code, message, {}, None)
|
python
|
def handleStatus(self, version, code, message):
"extends handleStatus to instantiate a local response object"
proxy.ProxyClient.handleStatus(self, version, code, message)
# client.Response is currently just a container for needed data
self._response = client.Response(version, code, message, {}, None)
|
[
"def",
"handleStatus",
"(",
"self",
",",
"version",
",",
"code",
",",
"message",
")",
":",
"proxy",
".",
"ProxyClient",
".",
"handleStatus",
"(",
"self",
",",
"version",
",",
"code",
",",
"message",
")",
"# client.Response is currently just a container for needed data",
"self",
".",
"_response",
"=",
"client",
".",
"Response",
"(",
"version",
",",
"code",
",",
"message",
",",
"{",
"}",
",",
"None",
")"
] |
extends handleStatus to instantiate a local response object
|
[
"extends",
"handleStatus",
"to",
"instantiate",
"a",
"local",
"response",
"object"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L51-L55
|
18,465
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheClient.modLocationPort
|
def modLocationPort(self, location):
"""
Ensures that the location port is a the given port value
Used in `handleHeader`
"""
components = urlparse.urlparse(location)
reverse_proxy_port = self.father.getHost().port
reverse_proxy_host = self.father.getHost().host
# returns an ordered dict of urlparse.ParseResult components
_components = components._asdict()
_components['netloc'] = '%s:%d' % (
reverse_proxy_host, reverse_proxy_port
)
return urlparse.urlunparse(_components.values())
|
python
|
def modLocationPort(self, location):
"""
Ensures that the location port is a the given port value
Used in `handleHeader`
"""
components = urlparse.urlparse(location)
reverse_proxy_port = self.father.getHost().port
reverse_proxy_host = self.father.getHost().host
# returns an ordered dict of urlparse.ParseResult components
_components = components._asdict()
_components['netloc'] = '%s:%d' % (
reverse_proxy_host, reverse_proxy_port
)
return urlparse.urlunparse(_components.values())
|
[
"def",
"modLocationPort",
"(",
"self",
",",
"location",
")",
":",
"components",
"=",
"urlparse",
".",
"urlparse",
"(",
"location",
")",
"reverse_proxy_port",
"=",
"self",
".",
"father",
".",
"getHost",
"(",
")",
".",
"port",
"reverse_proxy_host",
"=",
"self",
".",
"father",
".",
"getHost",
"(",
")",
".",
"host",
"# returns an ordered dict of urlparse.ParseResult components",
"_components",
"=",
"components",
".",
"_asdict",
"(",
")",
"_components",
"[",
"'netloc'",
"]",
"=",
"'%s:%d'",
"%",
"(",
"reverse_proxy_host",
",",
"reverse_proxy_port",
")",
"return",
"urlparse",
".",
"urlunparse",
"(",
"_components",
".",
"values",
"(",
")",
")"
] |
Ensures that the location port is a the given port value
Used in `handleHeader`
|
[
"Ensures",
"that",
"the",
"location",
"port",
"is",
"a",
"the",
"given",
"port",
"value",
"Used",
"in",
"handleHeader"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L57-L70
|
18,466
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheClient.handleResponsePart
|
def handleResponsePart(self, buffer):
"""
Sends the content to the browser and keeps a local copy of it.
buffer is just a str of the content to be shown, father is the intial
request.
"""
self.father.write(buffer)
self.buffer.write(buffer)
|
python
|
def handleResponsePart(self, buffer):
"""
Sends the content to the browser and keeps a local copy of it.
buffer is just a str of the content to be shown, father is the intial
request.
"""
self.father.write(buffer)
self.buffer.write(buffer)
|
[
"def",
"handleResponsePart",
"(",
"self",
",",
"buffer",
")",
":",
"self",
".",
"father",
".",
"write",
"(",
"buffer",
")",
"self",
".",
"buffer",
".",
"write",
"(",
"buffer",
")"
] |
Sends the content to the browser and keeps a local copy of it.
buffer is just a str of the content to be shown, father is the intial
request.
|
[
"Sends",
"the",
"content",
"to",
"the",
"browser",
"and",
"keeps",
"a",
"local",
"copy",
"of",
"it",
".",
"buffer",
"is",
"just",
"a",
"str",
"of",
"the",
"content",
"to",
"be",
"shown",
"father",
"is",
"the",
"intial",
"request",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L92-L99
|
18,467
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheProxyResource.getChild
|
def getChild(self, path, request):
"""
This is necessary because the parent class would call
proxy.ReverseProxyResource instead of CacheProxyResource
"""
return CacheProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor
)
|
python
|
def getChild(self, path, request):
"""
This is necessary because the parent class would call
proxy.ReverseProxyResource instead of CacheProxyResource
"""
return CacheProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor
)
|
[
"def",
"getChild",
"(",
"self",
",",
"path",
",",
"request",
")",
":",
"return",
"CacheProxyResource",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"self",
".",
"path",
"+",
"'/'",
"+",
"urlquote",
"(",
"path",
",",
"safe",
"=",
"\"\"",
")",
",",
"self",
".",
"reactor",
")"
] |
This is necessary because the parent class would call
proxy.ReverseProxyResource instead of CacheProxyResource
|
[
"This",
"is",
"necessary",
"because",
"the",
"parent",
"class",
"would",
"call",
"proxy",
".",
"ReverseProxyResource",
"instead",
"of",
"CacheProxyResource"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L148-L156
|
18,468
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheProxyResource.getChildWithDefault
|
def getChildWithDefault(self, path, request):
"""
Retrieve a static or dynamically generated child resource from me.
"""
cached_resource = self.getCachedResource(request)
if cached_resource:
reactor.callInThread(
responseInColor,
request,
'200 OK',
cached_resource,
'Cached',
'underscore'
)
return cached_resource
# original logic
if path in self.children:
return self.children[path]
return self.getChild(path, request)
|
python
|
def getChildWithDefault(self, path, request):
"""
Retrieve a static or dynamically generated child resource from me.
"""
cached_resource = self.getCachedResource(request)
if cached_resource:
reactor.callInThread(
responseInColor,
request,
'200 OK',
cached_resource,
'Cached',
'underscore'
)
return cached_resource
# original logic
if path in self.children:
return self.children[path]
return self.getChild(path, request)
|
[
"def",
"getChildWithDefault",
"(",
"self",
",",
"path",
",",
"request",
")",
":",
"cached_resource",
"=",
"self",
".",
"getCachedResource",
"(",
"request",
")",
"if",
"cached_resource",
":",
"reactor",
".",
"callInThread",
"(",
"responseInColor",
",",
"request",
",",
"'200 OK'",
",",
"cached_resource",
",",
"'Cached'",
",",
"'underscore'",
")",
"return",
"cached_resource",
"# original logic",
"if",
"path",
"in",
"self",
".",
"children",
":",
"return",
"self",
".",
"children",
"[",
"path",
"]",
"return",
"self",
".",
"getChild",
"(",
"path",
",",
"request",
")"
] |
Retrieve a static or dynamically generated child resource from me.
|
[
"Retrieve",
"a",
"static",
"or",
"dynamically",
"generated",
"child",
"resource",
"from",
"me",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L158-L176
|
18,469
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheProxyResource.render
|
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# set up and evaluate a connection to the target server
if self.port == 80:
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
request.requestHeaders.addRawHeader('host', host)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
global_self = self.getGlobalSelf()
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request,
global_self # this is new
)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET
|
python
|
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# set up and evaluate a connection to the target server
if self.port == 80:
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
request.requestHeaders.addRawHeader('host', host)
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
global_self = self.getGlobalSelf()
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request,
global_self # this is new
)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return NOT_DONE_YET
|
[
"def",
"render",
"(",
"self",
",",
"request",
")",
":",
"# set up and evaluate a connection to the target server",
"if",
"self",
".",
"port",
"==",
"80",
":",
"host",
"=",
"self",
".",
"host",
"else",
":",
"host",
"=",
"\"%s:%d\"",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"request",
".",
"requestHeaders",
".",
"addRawHeader",
"(",
"'host'",
",",
"host",
")",
"request",
".",
"content",
".",
"seek",
"(",
"0",
",",
"0",
")",
"qs",
"=",
"urlparse",
".",
"urlparse",
"(",
"request",
".",
"uri",
")",
"[",
"4",
"]",
"if",
"qs",
":",
"rest",
"=",
"self",
".",
"path",
"+",
"'?'",
"+",
"qs",
"else",
":",
"rest",
"=",
"self",
".",
"path",
"global_self",
"=",
"self",
".",
"getGlobalSelf",
"(",
")",
"clientFactory",
"=",
"self",
".",
"proxyClientFactoryClass",
"(",
"request",
".",
"method",
",",
"rest",
",",
"request",
".",
"clientproto",
",",
"request",
".",
"getAllHeaders",
"(",
")",
",",
"request",
".",
"content",
".",
"read",
"(",
")",
",",
"request",
",",
"global_self",
"# this is new",
")",
"self",
".",
"reactor",
".",
"connectTCP",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"clientFactory",
")",
"return",
"NOT_DONE_YET"
] |
Render a request by forwarding it to the proxied server.
|
[
"Render",
"a",
"request",
"by",
"forwarding",
"it",
"to",
"the",
"proxied",
"server",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L178-L204
|
18,470
|
hendrix/hendrix
|
hendrix/contrib/cache/resource.py
|
CacheProxyResource.getGlobalSelf
|
def getGlobalSelf(self):
"""
This searches the reactor for the original instance of
CacheProxyResource. This is necessary because with each call of
getChild a new instance of CacheProxyResource is created.
"""
transports = self.reactor.getReaders()
for transport in transports:
try:
resource = transport.factory.resource
if isinstance(resource, self.__class__) and resource.port == self.port:
return resource
except AttributeError:
pass
return
|
python
|
def getGlobalSelf(self):
"""
This searches the reactor for the original instance of
CacheProxyResource. This is necessary because with each call of
getChild a new instance of CacheProxyResource is created.
"""
transports = self.reactor.getReaders()
for transport in transports:
try:
resource = transport.factory.resource
if isinstance(resource, self.__class__) and resource.port == self.port:
return resource
except AttributeError:
pass
return
|
[
"def",
"getGlobalSelf",
"(",
"self",
")",
":",
"transports",
"=",
"self",
".",
"reactor",
".",
"getReaders",
"(",
")",
"for",
"transport",
"in",
"transports",
":",
"try",
":",
"resource",
"=",
"transport",
".",
"factory",
".",
"resource",
"if",
"isinstance",
"(",
"resource",
",",
"self",
".",
"__class__",
")",
"and",
"resource",
".",
"port",
"==",
"self",
".",
"port",
":",
"return",
"resource",
"except",
"AttributeError",
":",
"pass",
"return"
] |
This searches the reactor for the original instance of
CacheProxyResource. This is necessary because with each call of
getChild a new instance of CacheProxyResource is created.
|
[
"This",
"searches",
"the",
"reactor",
"for",
"the",
"original",
"instance",
"of",
"CacheProxyResource",
".",
"This",
"is",
"necessary",
"because",
"with",
"each",
"call",
"of",
"getChild",
"a",
"new",
"instance",
"of",
"CacheProxyResource",
"is",
"created",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/resource.py#L209-L223
|
18,471
|
hendrix/hendrix
|
hendrix/contrib/concurrency/resources.py
|
MessageHandlerProtocol.dataReceived
|
def dataReceived(self, data):
"""
Takes "data" which we assume is json encoded
If data has a subject_id attribute, we pass that to the dispatcher
as the subject_id so it will get carried through into any
return communications and be identifiable to the client
falls back to just passing the message along...
"""
try:
address = self.guid
data = json.loads(data)
threads.deferToThread(send_signal, self.dispatcher, data)
if 'hx_subscribe' in data:
return self.dispatcher.subscribe(self.transport, data)
if 'address' in data:
address = data['address']
else:
address = self.guid
self.dispatcher.send(address, data)
except Exception as e:
raise
self.dispatcher.send(
self.guid,
{'message': data, 'error': str(e)}
)
|
python
|
def dataReceived(self, data):
"""
Takes "data" which we assume is json encoded
If data has a subject_id attribute, we pass that to the dispatcher
as the subject_id so it will get carried through into any
return communications and be identifiable to the client
falls back to just passing the message along...
"""
try:
address = self.guid
data = json.loads(data)
threads.deferToThread(send_signal, self.dispatcher, data)
if 'hx_subscribe' in data:
return self.dispatcher.subscribe(self.transport, data)
if 'address' in data:
address = data['address']
else:
address = self.guid
self.dispatcher.send(address, data)
except Exception as e:
raise
self.dispatcher.send(
self.guid,
{'message': data, 'error': str(e)}
)
|
[
"def",
"dataReceived",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"address",
"=",
"self",
".",
"guid",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"threads",
".",
"deferToThread",
"(",
"send_signal",
",",
"self",
".",
"dispatcher",
",",
"data",
")",
"if",
"'hx_subscribe'",
"in",
"data",
":",
"return",
"self",
".",
"dispatcher",
".",
"subscribe",
"(",
"self",
".",
"transport",
",",
"data",
")",
"if",
"'address'",
"in",
"data",
":",
"address",
"=",
"data",
"[",
"'address'",
"]",
"else",
":",
"address",
"=",
"self",
".",
"guid",
"self",
".",
"dispatcher",
".",
"send",
"(",
"address",
",",
"data",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"self",
".",
"dispatcher",
".",
"send",
"(",
"self",
".",
"guid",
",",
"{",
"'message'",
":",
"data",
",",
"'error'",
":",
"str",
"(",
"e",
")",
"}",
")"
] |
Takes "data" which we assume is json encoded
If data has a subject_id attribute, we pass that to the dispatcher
as the subject_id so it will get carried through into any
return communications and be identifiable to the client
falls back to just passing the message along...
|
[
"Takes",
"data",
"which",
"we",
"assume",
"is",
"json",
"encoded",
"If",
"data",
"has",
"a",
"subject_id",
"attribute",
"we",
"pass",
"that",
"to",
"the",
"dispatcher",
"as",
"the",
"subject_id",
"so",
"it",
"will",
"get",
"carried",
"through",
"into",
"any",
"return",
"communications",
"and",
"be",
"identifiable",
"to",
"the",
"client"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/resources.py#L26-L57
|
18,472
|
hendrix/hendrix
|
hendrix/contrib/concurrency/resources.py
|
MessageHandlerProtocol.connectionMade
|
def connectionMade(self):
"""
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
"""
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid})
|
python
|
def connectionMade(self):
"""
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
"""
self.transport.uid = str(uuid.uuid1())
self.guid = self.dispatcher.add(self.transport)
self.dispatcher.send(self.guid, {'setup_connection': self.guid})
|
[
"def",
"connectionMade",
"(",
"self",
")",
":",
"self",
".",
"transport",
".",
"uid",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"self",
".",
"guid",
"=",
"self",
".",
"dispatcher",
".",
"add",
"(",
"self",
".",
"transport",
")",
"self",
".",
"dispatcher",
".",
"send",
"(",
"self",
".",
"guid",
",",
"{",
"'setup_connection'",
":",
"self",
".",
"guid",
"}",
")"
] |
establish the address of this new connection and add it to the list of
sockets managed by the dispatcher
reply to the transport with a "setup_connection" notice
containing the recipient's address for use by the client as a return
address for future communications
|
[
"establish",
"the",
"address",
"of",
"this",
"new",
"connection",
"and",
"add",
"it",
"to",
"the",
"list",
"of",
"sockets",
"managed",
"by",
"the",
"dispatcher"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/resources.py#L59-L71
|
18,473
|
hendrix/hendrix
|
hendrix/utils/conf.py
|
generateInitd
|
def generateInitd(conf_file):
"""
Helper function to generate the text content needed to create an init.d
executable
"""
allowed_opts = [
'virtualenv', 'project_path', 'settings', 'processes',
'http_port', 'cache', 'cache_port', 'https_port', 'key', 'cert'
]
base_opts = ['--daemonize', ] # always daemonize
options = base_opts
with open(conf_file, 'r') as cfg:
conf = yaml.load(cfg)
conf_specs = set(conf.keys())
if len(conf_specs - set(allowed_opts)):
raise RuntimeError('Improperly configured.')
try:
virtualenv = conf.pop('virtualenv')
project_path = conf.pop('project_path')
except:
raise RuntimeError('Improperly configured.')
cache = False
if 'cache' in conf:
cache = conf.pop('cache')
if not cache:
options.append('--nocache')
workers = 0
if 'processes' in conf:
processes = conf.pop('processes')
workers = int(processes) - 1
if workers > 0:
options += ['--workers', str(workers)]
for key, value in conf.iteritems():
options += ['--%s' % key, str(value)]
with open(os.path.join(SHARE_PATH, 'init.d.j2'), 'r') as f:
TEMPLATE_FILE = f.read()
template = jinja2.Template(TEMPLATE_FILE)
initd_content = template.render(
{
'venv_path': virtualenv,
'project_path': project_path,
'hendrix_opts': ' '.join(options)
}
)
return initd_content
|
python
|
def generateInitd(conf_file):
"""
Helper function to generate the text content needed to create an init.d
executable
"""
allowed_opts = [
'virtualenv', 'project_path', 'settings', 'processes',
'http_port', 'cache', 'cache_port', 'https_port', 'key', 'cert'
]
base_opts = ['--daemonize', ] # always daemonize
options = base_opts
with open(conf_file, 'r') as cfg:
conf = yaml.load(cfg)
conf_specs = set(conf.keys())
if len(conf_specs - set(allowed_opts)):
raise RuntimeError('Improperly configured.')
try:
virtualenv = conf.pop('virtualenv')
project_path = conf.pop('project_path')
except:
raise RuntimeError('Improperly configured.')
cache = False
if 'cache' in conf:
cache = conf.pop('cache')
if not cache:
options.append('--nocache')
workers = 0
if 'processes' in conf:
processes = conf.pop('processes')
workers = int(processes) - 1
if workers > 0:
options += ['--workers', str(workers)]
for key, value in conf.iteritems():
options += ['--%s' % key, str(value)]
with open(os.path.join(SHARE_PATH, 'init.d.j2'), 'r') as f:
TEMPLATE_FILE = f.read()
template = jinja2.Template(TEMPLATE_FILE)
initd_content = template.render(
{
'venv_path': virtualenv,
'project_path': project_path,
'hendrix_opts': ' '.join(options)
}
)
return initd_content
|
[
"def",
"generateInitd",
"(",
"conf_file",
")",
":",
"allowed_opts",
"=",
"[",
"'virtualenv'",
",",
"'project_path'",
",",
"'settings'",
",",
"'processes'",
",",
"'http_port'",
",",
"'cache'",
",",
"'cache_port'",
",",
"'https_port'",
",",
"'key'",
",",
"'cert'",
"]",
"base_opts",
"=",
"[",
"'--daemonize'",
",",
"]",
"# always daemonize",
"options",
"=",
"base_opts",
"with",
"open",
"(",
"conf_file",
",",
"'r'",
")",
"as",
"cfg",
":",
"conf",
"=",
"yaml",
".",
"load",
"(",
"cfg",
")",
"conf_specs",
"=",
"set",
"(",
"conf",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"conf_specs",
"-",
"set",
"(",
"allowed_opts",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Improperly configured.'",
")",
"try",
":",
"virtualenv",
"=",
"conf",
".",
"pop",
"(",
"'virtualenv'",
")",
"project_path",
"=",
"conf",
".",
"pop",
"(",
"'project_path'",
")",
"except",
":",
"raise",
"RuntimeError",
"(",
"'Improperly configured.'",
")",
"cache",
"=",
"False",
"if",
"'cache'",
"in",
"conf",
":",
"cache",
"=",
"conf",
".",
"pop",
"(",
"'cache'",
")",
"if",
"not",
"cache",
":",
"options",
".",
"append",
"(",
"'--nocache'",
")",
"workers",
"=",
"0",
"if",
"'processes'",
"in",
"conf",
":",
"processes",
"=",
"conf",
".",
"pop",
"(",
"'processes'",
")",
"workers",
"=",
"int",
"(",
"processes",
")",
"-",
"1",
"if",
"workers",
">",
"0",
":",
"options",
"+=",
"[",
"'--workers'",
",",
"str",
"(",
"workers",
")",
"]",
"for",
"key",
",",
"value",
"in",
"conf",
".",
"iteritems",
"(",
")",
":",
"options",
"+=",
"[",
"'--%s'",
"%",
"key",
",",
"str",
"(",
"value",
")",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"SHARE_PATH",
",",
"'init.d.j2'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"TEMPLATE_FILE",
"=",
"f",
".",
"read",
"(",
")",
"template",
"=",
"jinja2",
".",
"Template",
"(",
"TEMPLATE_FILE",
")",
"initd_content",
"=",
"template",
".",
"render",
"(",
"{",
"'venv_path'",
":",
"virtualenv",
",",
"'project_path'",
":",
"project_path",
",",
"'hendrix_opts'",
":",
"' '",
".",
"join",
"(",
"options",
")",
"}",
")",
"return",
"initd_content"
] |
Helper function to generate the text content needed to create an init.d
executable
|
[
"Helper",
"function",
"to",
"generate",
"the",
"text",
"content",
"needed",
"to",
"create",
"an",
"init",
".",
"d",
"executable"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/utils/conf.py#L9-L61
|
18,474
|
hendrix/hendrix
|
hendrix/facilities/response.py
|
LoudWSGIResponse.startResponse
|
def startResponse(self, status, headers, excInfo=None):
"""
extends startResponse to call speakerBox in a thread
"""
self.status = status
self.headers = headers
self.reactor.callInThread(
responseInColor, self.request, status, headers
)
return self.write
|
python
|
def startResponse(self, status, headers, excInfo=None):
"""
extends startResponse to call speakerBox in a thread
"""
self.status = status
self.headers = headers
self.reactor.callInThread(
responseInColor, self.request, status, headers
)
return self.write
|
[
"def",
"startResponse",
"(",
"self",
",",
"status",
",",
"headers",
",",
"excInfo",
"=",
"None",
")",
":",
"self",
".",
"status",
"=",
"status",
"self",
".",
"headers",
"=",
"headers",
"self",
".",
"reactor",
".",
"callInThread",
"(",
"responseInColor",
",",
"self",
".",
"request",
",",
"status",
",",
"headers",
")",
"return",
"self",
".",
"write"
] |
extends startResponse to call speakerBox in a thread
|
[
"extends",
"startResponse",
"to",
"call",
"speakerBox",
"in",
"a",
"thread"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/facilities/response.py#L41-L50
|
18,475
|
hendrix/hendrix
|
hendrix/contrib/cache/backends/__init__.py
|
CacheBackend.cacheContent
|
def cacheContent(self, request, response, buffer):
"""
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
"""
content = buffer.getvalue()
code = int(response.code)
cache_it = False
uri, bust = self.processURI(request.uri, PREFIX)
# Conditions for adding uri response to cache:
# * if it was successful i.e. status of in the 200s
# * requested using GET
# * not busted
if request.method == "GET" and code / 100 == 2 and not bust:
cache_control = response.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
if int(params.get('max-age', '0')) > 0:
cache_it = True
if cache_it:
content = compressBuffer(content)
self.addResource(content, uri, response.headers)
buffer.close()
|
python
|
def cacheContent(self, request, response, buffer):
"""
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
"""
content = buffer.getvalue()
code = int(response.code)
cache_it = False
uri, bust = self.processURI(request.uri, PREFIX)
# Conditions for adding uri response to cache:
# * if it was successful i.e. status of in the 200s
# * requested using GET
# * not busted
if request.method == "GET" and code / 100 == 2 and not bust:
cache_control = response.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
if int(params.get('max-age', '0')) > 0:
cache_it = True
if cache_it:
content = compressBuffer(content)
self.addResource(content, uri, response.headers)
buffer.close()
|
[
"def",
"cacheContent",
"(",
"self",
",",
"request",
",",
"response",
",",
"buffer",
")",
":",
"content",
"=",
"buffer",
".",
"getvalue",
"(",
")",
"code",
"=",
"int",
"(",
"response",
".",
"code",
")",
"cache_it",
"=",
"False",
"uri",
",",
"bust",
"=",
"self",
".",
"processURI",
"(",
"request",
".",
"uri",
",",
"PREFIX",
")",
"# Conditions for adding uri response to cache:",
"# * if it was successful i.e. status of in the 200s",
"# * requested using GET",
"# * not busted",
"if",
"request",
".",
"method",
"==",
"\"GET\"",
"and",
"code",
"/",
"100",
"==",
"2",
"and",
"not",
"bust",
":",
"cache_control",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"'cache-control'",
")",
"if",
"cache_control",
":",
"params",
"=",
"dict",
"(",
"urlparse",
".",
"parse_qsl",
"(",
"cache_control",
")",
")",
"if",
"int",
"(",
"params",
".",
"get",
"(",
"'max-age'",
",",
"'0'",
")",
")",
">",
"0",
":",
"cache_it",
"=",
"True",
"if",
"cache_it",
":",
"content",
"=",
"compressBuffer",
"(",
"content",
")",
"self",
".",
"addResource",
"(",
"content",
",",
"uri",
",",
"response",
".",
"headers",
")",
"buffer",
".",
"close",
"(",
")"
] |
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
|
[
"Checks",
"if",
"the",
"response",
"should",
"be",
"cached",
".",
"Caches",
"the",
"content",
"in",
"a",
"gzipped",
"format",
"given",
"that",
"a",
"cache_it",
"flag",
"is",
"True",
"To",
"be",
"used",
"CacheClient"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/cache/backends/__init__.py#L70-L94
|
18,476
|
hendrix/hendrix
|
hendrix/facilities/gather.py
|
get_additional_services
|
def get_additional_services(settings_module):
"""
if HENDRIX_SERVICES is specified in settings_module,
it should be a list twisted internet services
example:
HENDRIX_SERVICES = (
('myServiceName', 'apps.offload.services.TimeService'),
)
"""
additional_services = []
if hasattr(settings_module, 'HENDRIX_SERVICES'):
for name, module_path in settings_module.HENDRIX_SERVICES:
path_to_module, service_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_services.append(
(name, getattr(resource_module, service_name))
)
return additional_services
|
python
|
def get_additional_services(settings_module):
"""
if HENDRIX_SERVICES is specified in settings_module,
it should be a list twisted internet services
example:
HENDRIX_SERVICES = (
('myServiceName', 'apps.offload.services.TimeService'),
)
"""
additional_services = []
if hasattr(settings_module, 'HENDRIX_SERVICES'):
for name, module_path in settings_module.HENDRIX_SERVICES:
path_to_module, service_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_services.append(
(name, getattr(resource_module, service_name))
)
return additional_services
|
[
"def",
"get_additional_services",
"(",
"settings_module",
")",
":",
"additional_services",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"settings_module",
",",
"'HENDRIX_SERVICES'",
")",
":",
"for",
"name",
",",
"module_path",
"in",
"settings_module",
".",
"HENDRIX_SERVICES",
":",
"path_to_module",
",",
"service_name",
"=",
"module_path",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"resource_module",
"=",
"importlib",
".",
"import_module",
"(",
"path_to_module",
")",
"additional_services",
".",
"append",
"(",
"(",
"name",
",",
"getattr",
"(",
"resource_module",
",",
"service_name",
")",
")",
")",
"return",
"additional_services"
] |
if HENDRIX_SERVICES is specified in settings_module,
it should be a list twisted internet services
example:
HENDRIX_SERVICES = (
('myServiceName', 'apps.offload.services.TimeService'),
)
|
[
"if",
"HENDRIX_SERVICES",
"is",
"specified",
"in",
"settings_module",
"it",
"should",
"be",
"a",
"list",
"twisted",
"internet",
"services"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/facilities/gather.py#L4-L25
|
18,477
|
hendrix/hendrix
|
hendrix/facilities/gather.py
|
get_additional_resources
|
def get_additional_resources(settings_module):
"""
if HENDRIX_CHILD_RESOURCES is specified in settings_module,
it should be a list resources subclassed from hendrix.contrib.NamedResource
example:
HENDRIX_CHILD_RESOURCES = (
'apps.offload.resources.LongRunningProcessResource',
'apps.chat.resources.ChatResource',
)
"""
additional_resources = []
if hasattr(settings_module, 'HENDRIX_CHILD_RESOURCES'):
for module_path in settings_module.HENDRIX_CHILD_RESOURCES:
path_to_module, resource_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_resources.append(
getattr(resource_module, resource_name)
)
return additional_resources
|
python
|
def get_additional_resources(settings_module):
"""
if HENDRIX_CHILD_RESOURCES is specified in settings_module,
it should be a list resources subclassed from hendrix.contrib.NamedResource
example:
HENDRIX_CHILD_RESOURCES = (
'apps.offload.resources.LongRunningProcessResource',
'apps.chat.resources.ChatResource',
)
"""
additional_resources = []
if hasattr(settings_module, 'HENDRIX_CHILD_RESOURCES'):
for module_path in settings_module.HENDRIX_CHILD_RESOURCES:
path_to_module, resource_name = module_path.rsplit('.', 1)
resource_module = importlib.import_module(path_to_module)
additional_resources.append(
getattr(resource_module, resource_name)
)
return additional_resources
|
[
"def",
"get_additional_resources",
"(",
"settings_module",
")",
":",
"additional_resources",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"settings_module",
",",
"'HENDRIX_CHILD_RESOURCES'",
")",
":",
"for",
"module_path",
"in",
"settings_module",
".",
"HENDRIX_CHILD_RESOURCES",
":",
"path_to_module",
",",
"resource_name",
"=",
"module_path",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"resource_module",
"=",
"importlib",
".",
"import_module",
"(",
"path_to_module",
")",
"additional_resources",
".",
"append",
"(",
"getattr",
"(",
"resource_module",
",",
"resource_name",
")",
")",
"return",
"additional_resources"
] |
if HENDRIX_CHILD_RESOURCES is specified in settings_module,
it should be a list resources subclassed from hendrix.contrib.NamedResource
example:
HENDRIX_CHILD_RESOURCES = (
'apps.offload.resources.LongRunningProcessResource',
'apps.chat.resources.ChatResource',
)
|
[
"if",
"HENDRIX_CHILD_RESOURCES",
"is",
"specified",
"in",
"settings_module",
"it",
"should",
"be",
"a",
"list",
"resources",
"subclassed",
"from",
"hendrix",
".",
"contrib",
".",
"NamedResource"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/facilities/gather.py#L28-L52
|
18,478
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.getConf
|
def getConf(cls, settings, options):
"updates the options dict to use config options in the settings module"
ports = ['http_port', 'https_port', 'cache_port']
for port_name in ports:
port = getattr(settings, port_name.upper(), None)
# only use the settings ports if the defaults were left unchanged
default = getattr(defaults, port_name.upper())
if port and options.get(port_name) == default:
options[port_name] = port
_opts = [
('key', 'hx_private_key'),
('cert', 'hx_certficate'),
('wsgi', 'wsgi_application')
]
for opt_name, settings_name in _opts:
opt = getattr(settings, settings_name.upper(), None)
if opt:
options[opt_name] = opt
if not options['settings']:
options['settings'] = environ['DJANGO_SETTINGS_MODULE']
return options
|
python
|
def getConf(cls, settings, options):
"updates the options dict to use config options in the settings module"
ports = ['http_port', 'https_port', 'cache_port']
for port_name in ports:
port = getattr(settings, port_name.upper(), None)
# only use the settings ports if the defaults were left unchanged
default = getattr(defaults, port_name.upper())
if port and options.get(port_name) == default:
options[port_name] = port
_opts = [
('key', 'hx_private_key'),
('cert', 'hx_certficate'),
('wsgi', 'wsgi_application')
]
for opt_name, settings_name in _opts:
opt = getattr(settings, settings_name.upper(), None)
if opt:
options[opt_name] = opt
if not options['settings']:
options['settings'] = environ['DJANGO_SETTINGS_MODULE']
return options
|
[
"def",
"getConf",
"(",
"cls",
",",
"settings",
",",
"options",
")",
":",
"ports",
"=",
"[",
"'http_port'",
",",
"'https_port'",
",",
"'cache_port'",
"]",
"for",
"port_name",
"in",
"ports",
":",
"port",
"=",
"getattr",
"(",
"settings",
",",
"port_name",
".",
"upper",
"(",
")",
",",
"None",
")",
"# only use the settings ports if the defaults were left unchanged",
"default",
"=",
"getattr",
"(",
"defaults",
",",
"port_name",
".",
"upper",
"(",
")",
")",
"if",
"port",
"and",
"options",
".",
"get",
"(",
"port_name",
")",
"==",
"default",
":",
"options",
"[",
"port_name",
"]",
"=",
"port",
"_opts",
"=",
"[",
"(",
"'key'",
",",
"'hx_private_key'",
")",
",",
"(",
"'cert'",
",",
"'hx_certficate'",
")",
",",
"(",
"'wsgi'",
",",
"'wsgi_application'",
")",
"]",
"for",
"opt_name",
",",
"settings_name",
"in",
"_opts",
":",
"opt",
"=",
"getattr",
"(",
"settings",
",",
"settings_name",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"opt",
":",
"options",
"[",
"opt_name",
"]",
"=",
"opt",
"if",
"not",
"options",
"[",
"'settings'",
"]",
":",
"options",
"[",
"'settings'",
"]",
"=",
"environ",
"[",
"'DJANGO_SETTINGS_MODULE'",
"]",
"return",
"options"
] |
updates the options dict to use config options in the settings module
|
[
"updates",
"the",
"options",
"dict",
"to",
"use",
"config",
"options",
"in",
"the",
"settings",
"module"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L99-L121
|
18,479
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.addHendrix
|
def addHendrix(self):
'''
Instantiates a HendrixService with this object's threadpool.
It will be added as a service later.
'''
self.hendrix = HendrixService(
self.application,
threadpool=self.getThreadPool(),
resources=self.resources,
services=self.services,
loud=self.options['loud']
)
if self.options["https_only"] is not True:
self.hendrix.spawn_new_server(self.options['http_port'], HendrixTCPService)
|
python
|
def addHendrix(self):
'''
Instantiates a HendrixService with this object's threadpool.
It will be added as a service later.
'''
self.hendrix = HendrixService(
self.application,
threadpool=self.getThreadPool(),
resources=self.resources,
services=self.services,
loud=self.options['loud']
)
if self.options["https_only"] is not True:
self.hendrix.spawn_new_server(self.options['http_port'], HendrixTCPService)
|
[
"def",
"addHendrix",
"(",
"self",
")",
":",
"self",
".",
"hendrix",
"=",
"HendrixService",
"(",
"self",
".",
"application",
",",
"threadpool",
"=",
"self",
".",
"getThreadPool",
"(",
")",
",",
"resources",
"=",
"self",
".",
"resources",
",",
"services",
"=",
"self",
".",
"services",
",",
"loud",
"=",
"self",
".",
"options",
"[",
"'loud'",
"]",
")",
"if",
"self",
".",
"options",
"[",
"\"https_only\"",
"]",
"is",
"not",
"True",
":",
"self",
".",
"hendrix",
".",
"spawn_new_server",
"(",
"self",
".",
"options",
"[",
"'http_port'",
"]",
",",
"HendrixTCPService",
")"
] |
Instantiates a HendrixService with this object's threadpool.
It will be added as a service later.
|
[
"Instantiates",
"a",
"HendrixService",
"with",
"this",
"object",
"s",
"threadpool",
".",
"It",
"will",
"be",
"added",
"as",
"a",
"service",
"later",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L144-L157
|
18,480
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.catalogServers
|
def catalogServers(self, hendrix):
"collects a list of service names serving on TCP or SSL"
for service in hendrix.services:
if isinstance(service, (TCPServer, SSLServer)):
self.servers.append(service.name)
|
python
|
def catalogServers(self, hendrix):
"collects a list of service names serving on TCP or SSL"
for service in hendrix.services:
if isinstance(service, (TCPServer, SSLServer)):
self.servers.append(service.name)
|
[
"def",
"catalogServers",
"(",
"self",
",",
"hendrix",
")",
":",
"for",
"service",
"in",
"hendrix",
".",
"services",
":",
"if",
"isinstance",
"(",
"service",
",",
"(",
"TCPServer",
",",
"SSLServer",
")",
")",
":",
"self",
".",
"servers",
".",
"append",
"(",
"service",
".",
"name",
")"
] |
collects a list of service names serving on TCP or SSL
|
[
"collects",
"a",
"list",
"of",
"service",
"names",
"serving",
"on",
"TCP",
"or",
"SSL"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L159-L163
|
18,481
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.run
|
def run(self):
"sets up the desired services and runs the requested action"
self.addServices()
self.catalogServers(self.hendrix)
action = self.action
fd = self.options['fd']
if action.startswith('start'):
chalk.blue(self._listening_message())
getattr(self, action)(fd)
###########################
# annnnd run the reactor! #
###########################
try:
self.reactor.run()
finally:
shutil.rmtree(PID_DIR, ignore_errors=True) # cleanup tmp PID dir
elif action == 'restart':
getattr(self, action)(fd=fd)
else:
getattr(self, action)()
|
python
|
def run(self):
"sets up the desired services and runs the requested action"
self.addServices()
self.catalogServers(self.hendrix)
action = self.action
fd = self.options['fd']
if action.startswith('start'):
chalk.blue(self._listening_message())
getattr(self, action)(fd)
###########################
# annnnd run the reactor! #
###########################
try:
self.reactor.run()
finally:
shutil.rmtree(PID_DIR, ignore_errors=True) # cleanup tmp PID dir
elif action == 'restart':
getattr(self, action)(fd=fd)
else:
getattr(self, action)()
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"addServices",
"(",
")",
"self",
".",
"catalogServers",
"(",
"self",
".",
"hendrix",
")",
"action",
"=",
"self",
".",
"action",
"fd",
"=",
"self",
".",
"options",
"[",
"'fd'",
"]",
"if",
"action",
".",
"startswith",
"(",
"'start'",
")",
":",
"chalk",
".",
"blue",
"(",
"self",
".",
"_listening_message",
"(",
")",
")",
"getattr",
"(",
"self",
",",
"action",
")",
"(",
"fd",
")",
"###########################",
"# annnnd run the reactor! #",
"###########################",
"try",
":",
"self",
".",
"reactor",
".",
"run",
"(",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"PID_DIR",
",",
"ignore_errors",
"=",
"True",
")",
"# cleanup tmp PID dir",
"elif",
"action",
"==",
"'restart'",
":",
"getattr",
"(",
"self",
",",
"action",
")",
"(",
"fd",
"=",
"fd",
")",
"else",
":",
"getattr",
"(",
"self",
",",
"action",
")",
"(",
")"
] |
sets up the desired services and runs the requested action
|
[
"sets",
"up",
"the",
"desired",
"services",
"and",
"runs",
"the",
"requested",
"action"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L169-L191
|
18,482
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.setFDs
|
def setFDs(self):
"""
Iterator for file descriptors.
Seperated from launchworkers for clarity and readability.
"""
# 0 corresponds to stdin, 1 to stdout, 2 to stderr
self.childFDs = {0: 0, 1: 1, 2: 2}
self.fds = {}
for name in self.servers:
self.port = self.hendrix.get_port(name)
fd = self.port.fileno()
self.childFDs[fd] = fd
self.fds[name] = fd
|
python
|
def setFDs(self):
"""
Iterator for file descriptors.
Seperated from launchworkers for clarity and readability.
"""
# 0 corresponds to stdin, 1 to stdout, 2 to stderr
self.childFDs = {0: 0, 1: 1, 2: 2}
self.fds = {}
for name in self.servers:
self.port = self.hendrix.get_port(name)
fd = self.port.fileno()
self.childFDs[fd] = fd
self.fds[name] = fd
|
[
"def",
"setFDs",
"(",
"self",
")",
":",
"# 0 corresponds to stdin, 1 to stdout, 2 to stderr",
"self",
".",
"childFDs",
"=",
"{",
"0",
":",
"0",
",",
"1",
":",
"1",
",",
"2",
":",
"2",
"}",
"self",
".",
"fds",
"=",
"{",
"}",
"for",
"name",
"in",
"self",
".",
"servers",
":",
"self",
".",
"port",
"=",
"self",
".",
"hendrix",
".",
"get_port",
"(",
"name",
")",
"fd",
"=",
"self",
".",
"port",
".",
"fileno",
"(",
")",
"self",
".",
"childFDs",
"[",
"fd",
"]",
"=",
"fd",
"self",
".",
"fds",
"[",
"name",
"]",
"=",
"fd"
] |
Iterator for file descriptors.
Seperated from launchworkers for clarity and readability.
|
[
"Iterator",
"for",
"file",
"descriptors",
".",
"Seperated",
"from",
"launchworkers",
"for",
"clarity",
"and",
"readability",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L240-L252
|
18,483
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.addSubprocess
|
def addSubprocess(self, fds, name, factory):
"""
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
"""
self._lock.run(self._addSubprocess, self, fds, name, factory)
|
python
|
def addSubprocess(self, fds, name, factory):
"""
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
"""
self._lock.run(self._addSubprocess, self, fds, name, factory)
|
[
"def",
"addSubprocess",
"(",
"self",
",",
"fds",
",",
"name",
",",
"factory",
")",
":",
"self",
".",
"_lock",
".",
"run",
"(",
"self",
".",
"_addSubprocess",
",",
"self",
",",
"fds",
",",
"name",
",",
"factory",
")"
] |
Public method for _addSubprocess.
Wraps reactor.adoptStreamConnection in
a simple DeferredLock to guarantee
workers play well together.
|
[
"Public",
"method",
"for",
"_addSubprocess",
".",
"Wraps",
"reactor",
".",
"adoptStreamConnection",
"in",
"a",
"simple",
"DeferredLock",
"to",
"guarantee",
"workers",
"play",
"well",
"together",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L273-L280
|
18,484
|
hendrix/hendrix
|
hendrix/deploy/base.py
|
HendrixDeploy.disownService
|
def disownService(self, name):
"""
disowns a service on hendirix by name
returns a factory for use in the adoptStreamPort part of setting up
multiple processes
"""
_service = self.hendrix.getServiceNamed(name)
_service.disownServiceParent()
return _service.factory
|
python
|
def disownService(self, name):
"""
disowns a service on hendirix by name
returns a factory for use in the adoptStreamPort part of setting up
multiple processes
"""
_service = self.hendrix.getServiceNamed(name)
_service.disownServiceParent()
return _service.factory
|
[
"def",
"disownService",
"(",
"self",
",",
"name",
")",
":",
"_service",
"=",
"self",
".",
"hendrix",
".",
"getServiceNamed",
"(",
"name",
")",
"_service",
".",
"disownServiceParent",
"(",
")",
"return",
"_service",
".",
"factory"
] |
disowns a service on hendirix by name
returns a factory for use in the adoptStreamPort part of setting up
multiple processes
|
[
"disowns",
"a",
"service",
"on",
"hendirix",
"by",
"name",
"returns",
"a",
"factory",
"for",
"use",
"in",
"the",
"adoptStreamPort",
"part",
"of",
"setting",
"up",
"multiple",
"processes"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/base.py#L307-L315
|
18,485
|
hendrix/hendrix
|
hendrix/utils/__init__.py
|
get_pid
|
def get_pid(options):
"""returns The default location of the pid file for process management"""
namespace = options['settings'] if options['settings'] else options['wsgi']
return os.path.join('{}', '{}_{}.pid').format(PID_DIR, options['http_port'], namespace.replace('.', '_'))
|
python
|
def get_pid(options):
"""returns The default location of the pid file for process management"""
namespace = options['settings'] if options['settings'] else options['wsgi']
return os.path.join('{}', '{}_{}.pid').format(PID_DIR, options['http_port'], namespace.replace('.', '_'))
|
[
"def",
"get_pid",
"(",
"options",
")",
":",
"namespace",
"=",
"options",
"[",
"'settings'",
"]",
"if",
"options",
"[",
"'settings'",
"]",
"else",
"options",
"[",
"'wsgi'",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'{}'",
",",
"'{}_{}.pid'",
")",
".",
"format",
"(",
"PID_DIR",
",",
"options",
"[",
"'http_port'",
"]",
",",
"namespace",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")"
] |
returns The default location of the pid file for process management
|
[
"returns",
"The",
"default",
"location",
"of",
"the",
"pid",
"file",
"for",
"process",
"management"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/utils/__init__.py#L21-L24
|
18,486
|
hendrix/hendrix
|
hendrix/utils/__init__.py
|
responseInColor
|
def responseInColor(request, status, headers, prefix='Response', opts=None):
"Prints the response info in color"
code, message = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (
prefix,
code,
str(request.host),
request.method,
request.path,
os.getpid()
)
signal = int(code) / 100
if signal == 2:
chalk.green(message, opts=opts)
elif signal == 3:
chalk.blue(message, opts=opts)
else:
chalk.red(message, opts=opts)
|
python
|
def responseInColor(request, status, headers, prefix='Response', opts=None):
"Prints the response info in color"
code, message = status.split(None, 1)
message = '%s [%s] => Request %s %s %s on pid %d' % (
prefix,
code,
str(request.host),
request.method,
request.path,
os.getpid()
)
signal = int(code) / 100
if signal == 2:
chalk.green(message, opts=opts)
elif signal == 3:
chalk.blue(message, opts=opts)
else:
chalk.red(message, opts=opts)
|
[
"def",
"responseInColor",
"(",
"request",
",",
"status",
",",
"headers",
",",
"prefix",
"=",
"'Response'",
",",
"opts",
"=",
"None",
")",
":",
"code",
",",
"message",
"=",
"status",
".",
"split",
"(",
"None",
",",
"1",
")",
"message",
"=",
"'%s [%s] => Request %s %s %s on pid %d'",
"%",
"(",
"prefix",
",",
"code",
",",
"str",
"(",
"request",
".",
"host",
")",
",",
"request",
".",
"method",
",",
"request",
".",
"path",
",",
"os",
".",
"getpid",
"(",
")",
")",
"signal",
"=",
"int",
"(",
"code",
")",
"/",
"100",
"if",
"signal",
"==",
"2",
":",
"chalk",
".",
"green",
"(",
"message",
",",
"opts",
"=",
"opts",
")",
"elif",
"signal",
"==",
"3",
":",
"chalk",
".",
"blue",
"(",
"message",
",",
"opts",
"=",
"opts",
")",
"else",
":",
"chalk",
".",
"red",
"(",
"message",
",",
"opts",
"=",
"opts",
")"
] |
Prints the response info in color
|
[
"Prints",
"the",
"response",
"info",
"in",
"color"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/utils/__init__.py#L27-L44
|
18,487
|
hendrix/hendrix
|
hendrix/deploy/cache.py
|
HendrixDeployCache.addLocalCacheService
|
def addLocalCacheService(self):
"adds a CacheService to the instatiated HendrixService"
_cache = self.getCacheService()
_cache.setName('cache_proxy')
_cache.setServiceParent(self.hendrix)
|
python
|
def addLocalCacheService(self):
"adds a CacheService to the instatiated HendrixService"
_cache = self.getCacheService()
_cache.setName('cache_proxy')
_cache.setServiceParent(self.hendrix)
|
[
"def",
"addLocalCacheService",
"(",
"self",
")",
":",
"_cache",
"=",
"self",
".",
"getCacheService",
"(",
")",
"_cache",
".",
"setName",
"(",
"'cache_proxy'",
")",
"_cache",
".",
"setServiceParent",
"(",
"self",
".",
"hendrix",
")"
] |
adds a CacheService to the instatiated HendrixService
|
[
"adds",
"a",
"CacheService",
"to",
"the",
"instatiated",
"HendrixService"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/cache.py#L24-L28
|
18,488
|
hendrix/hendrix
|
hendrix/deploy/cache.py
|
HendrixDeployCache.addGlobalServices
|
def addGlobalServices(self):
"""
This is where we put service that we don't want to be duplicated on
worker subprocesses
"""
if self.options.get('global_cache') and self.options.get('cache'):
# only add the cache service here if the global_cache and cache
# options were set to True
_cache = self.getCacheService()
_cache.startService()
|
python
|
def addGlobalServices(self):
"""
This is where we put service that we don't want to be duplicated on
worker subprocesses
"""
if self.options.get('global_cache') and self.options.get('cache'):
# only add the cache service here if the global_cache and cache
# options were set to True
_cache = self.getCacheService()
_cache.startService()
|
[
"def",
"addGlobalServices",
"(",
"self",
")",
":",
"if",
"self",
".",
"options",
".",
"get",
"(",
"'global_cache'",
")",
"and",
"self",
".",
"options",
".",
"get",
"(",
"'cache'",
")",
":",
"# only add the cache service here if the global_cache and cache",
"# options were set to True",
"_cache",
"=",
"self",
".",
"getCacheService",
"(",
")",
"_cache",
".",
"startService",
"(",
")"
] |
This is where we put service that we don't want to be duplicated on
worker subprocesses
|
[
"This",
"is",
"where",
"we",
"put",
"service",
"that",
"we",
"don",
"t",
"want",
"to",
"be",
"duplicated",
"on",
"worker",
"subprocesses"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/deploy/cache.py#L38-L47
|
18,489
|
hendrix/hendrix
|
hendrix/facilities/resources.py
|
HendrixResource.putNamedChild
|
def putNamedChild(self, res):
"""
putNamedChild takes either an instance of hendrix.contrib.NamedResource
or any resource.Resource with a "namespace" attribute as a means of
allowing application level control of resource namespacing.
if a child is already found at an existing path,
resources with paths that are children of those physical paths
will be added as children of those resources
"""
try:
EmptyResource = resource.Resource
namespace = res.namespace
parts = namespace.strip('/').split('/')
# initialise parent and children
parent = self
children = self.children
# loop through all of the path parts except for the last one
for name in parts[:-1]:
child = children.get(name)
if not child:
# if the child does not exist then create an empty one
# and associate it to the parent
child = EmptyResource()
parent.putChild(name, child)
# update parent and children for the next iteration
parent = child
children = parent.children
name = parts[-1] # get the path part that we care about
if children.get(name):
self.logger.warn(
'A resource already exists at this path. Check '
'your resources list to ensure each path is '
'unique. The previous resource will be overridden.'
)
parent.putChild(name, res)
except AttributeError:
# raise an attribute error if the resource `res` doesn't contain
# the attribute `namespace`
msg = (
'%r improperly configured. additional_resources instances must'
' have a namespace attribute'
) % resource
raise AttributeError(msg, None, sys.exc_info()[2])
|
python
|
def putNamedChild(self, res):
"""
putNamedChild takes either an instance of hendrix.contrib.NamedResource
or any resource.Resource with a "namespace" attribute as a means of
allowing application level control of resource namespacing.
if a child is already found at an existing path,
resources with paths that are children of those physical paths
will be added as children of those resources
"""
try:
EmptyResource = resource.Resource
namespace = res.namespace
parts = namespace.strip('/').split('/')
# initialise parent and children
parent = self
children = self.children
# loop through all of the path parts except for the last one
for name in parts[:-1]:
child = children.get(name)
if not child:
# if the child does not exist then create an empty one
# and associate it to the parent
child = EmptyResource()
parent.putChild(name, child)
# update parent and children for the next iteration
parent = child
children = parent.children
name = parts[-1] # get the path part that we care about
if children.get(name):
self.logger.warn(
'A resource already exists at this path. Check '
'your resources list to ensure each path is '
'unique. The previous resource will be overridden.'
)
parent.putChild(name, res)
except AttributeError:
# raise an attribute error if the resource `res` doesn't contain
# the attribute `namespace`
msg = (
'%r improperly configured. additional_resources instances must'
' have a namespace attribute'
) % resource
raise AttributeError(msg, None, sys.exc_info()[2])
|
[
"def",
"putNamedChild",
"(",
"self",
",",
"res",
")",
":",
"try",
":",
"EmptyResource",
"=",
"resource",
".",
"Resource",
"namespace",
"=",
"res",
".",
"namespace",
"parts",
"=",
"namespace",
".",
"strip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"# initialise parent and children",
"parent",
"=",
"self",
"children",
"=",
"self",
".",
"children",
"# loop through all of the path parts except for the last one",
"for",
"name",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"child",
"=",
"children",
".",
"get",
"(",
"name",
")",
"if",
"not",
"child",
":",
"# if the child does not exist then create an empty one",
"# and associate it to the parent",
"child",
"=",
"EmptyResource",
"(",
")",
"parent",
".",
"putChild",
"(",
"name",
",",
"child",
")",
"# update parent and children for the next iteration",
"parent",
"=",
"child",
"children",
"=",
"parent",
".",
"children",
"name",
"=",
"parts",
"[",
"-",
"1",
"]",
"# get the path part that we care about",
"if",
"children",
".",
"get",
"(",
"name",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'A resource already exists at this path. Check '",
"'your resources list to ensure each path is '",
"'unique. The previous resource will be overridden.'",
")",
"parent",
".",
"putChild",
"(",
"name",
",",
"res",
")",
"except",
"AttributeError",
":",
"# raise an attribute error if the resource `res` doesn't contain",
"# the attribute `namespace`",
"msg",
"=",
"(",
"'%r improperly configured. additional_resources instances must'",
"' have a namespace attribute'",
")",
"%",
"resource",
"raise",
"AttributeError",
"(",
"msg",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")"
] |
putNamedChild takes either an instance of hendrix.contrib.NamedResource
or any resource.Resource with a "namespace" attribute as a means of
allowing application level control of resource namespacing.
if a child is already found at an existing path,
resources with paths that are children of those physical paths
will be added as children of those resources
|
[
"putNamedChild",
"takes",
"either",
"an",
"instance",
"of",
"hendrix",
".",
"contrib",
".",
"NamedResource",
"or",
"any",
"resource",
".",
"Resource",
"with",
"a",
"namespace",
"attribute",
"as",
"a",
"means",
"of",
"allowing",
"application",
"level",
"control",
"of",
"resource",
"namespacing",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/facilities/resources.py#L59-L105
|
18,490
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
send_json_message
|
def send_json_message(address, message, **kwargs):
"""
a shortcut for message sending
"""
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data)
|
python
|
def send_json_message(address, message, **kwargs):
"""
a shortcut for message sending
"""
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data)
|
[
"def",
"send_json_message",
"(",
"address",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"{",
"'message'",
":",
"message",
",",
"}",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'subject_id'",
")",
":",
"data",
"[",
"'subject_id'",
"]",
"=",
"address",
"data",
".",
"update",
"(",
"kwargs",
")",
"hxdispatcher",
".",
"send",
"(",
"address",
",",
"data",
")"
] |
a shortcut for message sending
|
[
"a",
"shortcut",
"for",
"message",
"sending"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L125-L139
|
18,491
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
send_callback_json_message
|
def send_callback_json_message(value, *args, **kwargs):
"""
useful for sending messages from callbacks as it puts the
result of the callback in the dict for serialization
"""
if value:
kwargs['result'] = value
send_json_message(args[0], args[1], **kwargs)
return value
|
python
|
def send_callback_json_message(value, *args, **kwargs):
"""
useful for sending messages from callbacks as it puts the
result of the callback in the dict for serialization
"""
if value:
kwargs['result'] = value
send_json_message(args[0], args[1], **kwargs)
return value
|
[
"def",
"send_callback_json_message",
"(",
"value",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"value",
":",
"kwargs",
"[",
"'result'",
"]",
"=",
"value",
"send_json_message",
"(",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
"]",
",",
"*",
"*",
"kwargs",
")",
"return",
"value"
] |
useful for sending messages from callbacks as it puts the
result of the callback in the dict for serialization
|
[
"useful",
"for",
"sending",
"messages",
"from",
"callbacks",
"as",
"it",
"puts",
"the",
"result",
"of",
"the",
"callback",
"in",
"the",
"dict",
"for",
"serialization"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L142-L153
|
18,492
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
RecipientManager.send
|
def send(self, message): # usually a json string...
"""
sends whatever it is to each transport
"""
for transport in self.transports.values():
transport.protocol.sendMessage(message)
|
python
|
def send(self, message): # usually a json string...
"""
sends whatever it is to each transport
"""
for transport in self.transports.values():
transport.protocol.sendMessage(message)
|
[
"def",
"send",
"(",
"self",
",",
"message",
")",
":",
"# usually a json string...",
"for",
"transport",
"in",
"self",
".",
"transports",
".",
"values",
"(",
")",
":",
"transport",
".",
"protocol",
".",
"sendMessage",
"(",
"message",
")"
] |
sends whatever it is to each transport
|
[
"sends",
"whatever",
"it",
"is",
"to",
"each",
"transport"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L34-L39
|
18,493
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
RecipientManager.remove
|
def remove(self, transport):
"""
removes a transport if a member of this group
"""
if transport.uid in self.transports:
del (self.transports[transport.uid])
|
python
|
def remove(self, transport):
"""
removes a transport if a member of this group
"""
if transport.uid in self.transports:
del (self.transports[transport.uid])
|
[
"def",
"remove",
"(",
"self",
",",
"transport",
")",
":",
"if",
"transport",
".",
"uid",
"in",
"self",
".",
"transports",
":",
"del",
"(",
"self",
".",
"transports",
"[",
"transport",
".",
"uid",
"]",
")"
] |
removes a transport if a member of this group
|
[
"removes",
"a",
"transport",
"if",
"a",
"member",
"of",
"this",
"group"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L41-L46
|
18,494
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
MessageDispatcher.add
|
def add(self, transport, address=None):
"""
add a new recipient to be addressable by this MessageDispatcher
generate a new uuid address if one is not specified
"""
if not address:
address = str(uuid.uuid1())
if address in self.recipients:
self.recipients[address].add(transport)
else:
self.recipients[address] = RecipientManager(transport, address)
return address
|
python
|
def add(self, transport, address=None):
"""
add a new recipient to be addressable by this MessageDispatcher
generate a new uuid address if one is not specified
"""
if not address:
address = str(uuid.uuid1())
if address in self.recipients:
self.recipients[address].add(transport)
else:
self.recipients[address] = RecipientManager(transport, address)
return address
|
[
"def",
"add",
"(",
"self",
",",
"transport",
",",
"address",
"=",
"None",
")",
":",
"if",
"not",
"address",
":",
"address",
"=",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
"if",
"address",
"in",
"self",
".",
"recipients",
":",
"self",
".",
"recipients",
"[",
"address",
"]",
".",
"add",
"(",
"transport",
")",
"else",
":",
"self",
".",
"recipients",
"[",
"address",
"]",
"=",
"RecipientManager",
"(",
"transport",
",",
"address",
")",
"return",
"address"
] |
add a new recipient to be addressable by this MessageDispatcher
generate a new uuid address if one is not specified
|
[
"add",
"a",
"new",
"recipient",
"to",
"be",
"addressable",
"by",
"this",
"MessageDispatcher",
"generate",
"a",
"new",
"uuid",
"address",
"if",
"one",
"is",
"not",
"specified"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L69-L83
|
18,495
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
MessageDispatcher.remove
|
def remove(self, transport):
"""
removes a transport from all channels to which it belongs.
"""
recipients = copy.copy(self.recipients)
for address, recManager in recipients.items():
recManager.remove(transport)
if not len(recManager.transports):
del self.recipients[address]
|
python
|
def remove(self, transport):
"""
removes a transport from all channels to which it belongs.
"""
recipients = copy.copy(self.recipients)
for address, recManager in recipients.items():
recManager.remove(transport)
if not len(recManager.transports):
del self.recipients[address]
|
[
"def",
"remove",
"(",
"self",
",",
"transport",
")",
":",
"recipients",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"recipients",
")",
"for",
"address",
",",
"recManager",
"in",
"recipients",
".",
"items",
"(",
")",
":",
"recManager",
".",
"remove",
"(",
"transport",
")",
"if",
"not",
"len",
"(",
"recManager",
".",
"transports",
")",
":",
"del",
"self",
".",
"recipients",
"[",
"address",
"]"
] |
removes a transport from all channels to which it belongs.
|
[
"removes",
"a",
"transport",
"from",
"all",
"channels",
"to",
"which",
"it",
"belongs",
"."
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L85-L93
|
18,496
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
MessageDispatcher.send
|
def send(self, address, data_dict):
"""
address can either be a string or a list of strings
data_dict gets sent along as is and could contain anything
"""
if type(address) == list:
recipients = [self.recipients.get(rec) for rec in address]
else:
recipients = [self.recipients.get(address)]
if recipients:
for recipient in recipients:
if recipient:
recipient.send(json.dumps(data_dict).encode())
|
python
|
def send(self, address, data_dict):
"""
address can either be a string or a list of strings
data_dict gets sent along as is and could contain anything
"""
if type(address) == list:
recipients = [self.recipients.get(rec) for rec in address]
else:
recipients = [self.recipients.get(address)]
if recipients:
for recipient in recipients:
if recipient:
recipient.send(json.dumps(data_dict).encode())
|
[
"def",
"send",
"(",
"self",
",",
"address",
",",
"data_dict",
")",
":",
"if",
"type",
"(",
"address",
")",
"==",
"list",
":",
"recipients",
"=",
"[",
"self",
".",
"recipients",
".",
"get",
"(",
"rec",
")",
"for",
"rec",
"in",
"address",
"]",
"else",
":",
"recipients",
"=",
"[",
"self",
".",
"recipients",
".",
"get",
"(",
"address",
")",
"]",
"if",
"recipients",
":",
"for",
"recipient",
"in",
"recipients",
":",
"if",
"recipient",
":",
"recipient",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"data_dict",
")",
".",
"encode",
"(",
")",
")"
] |
address can either be a string or a list of strings
data_dict gets sent along as is and could contain anything
|
[
"address",
"can",
"either",
"be",
"a",
"string",
"or",
"a",
"list",
"of",
"strings"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L95-L110
|
18,497
|
hendrix/hendrix
|
hendrix/contrib/concurrency/messaging.py
|
MessageDispatcher.subscribe
|
def subscribe(self, transport, data):
"""
adds a transport to a channel
"""
self.add(transport, address=data.get('hx_subscribe').encode())
self.send(
data['hx_subscribe'],
{'message': "%r is listening" % transport}
)
|
python
|
def subscribe(self, transport, data):
"""
adds a transport to a channel
"""
self.add(transport, address=data.get('hx_subscribe').encode())
self.send(
data['hx_subscribe'],
{'message': "%r is listening" % transport}
)
|
[
"def",
"subscribe",
"(",
"self",
",",
"transport",
",",
"data",
")",
":",
"self",
".",
"add",
"(",
"transport",
",",
"address",
"=",
"data",
".",
"get",
"(",
"'hx_subscribe'",
")",
".",
"encode",
"(",
")",
")",
"self",
".",
"send",
"(",
"data",
"[",
"'hx_subscribe'",
"]",
",",
"{",
"'message'",
":",
"\"%r is listening\"",
"%",
"transport",
"}",
")"
] |
adds a transport to a channel
|
[
"adds",
"a",
"transport",
"to",
"a",
"channel"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L112-L122
|
18,498
|
hendrix/hendrix
|
hendrix/options.py
|
cleanOptions
|
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--quiet', '--loud'
]
store_false = []
for key, value in options.items():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return _reload, opts
|
python
|
def cleanOptions(options):
"""
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
"""
_reload = options.pop('reload')
dev = options.pop('dev')
opts = []
store_true = [
'--nocache', '--global_cache', '--quiet', '--loud'
]
store_false = []
for key, value in options.items():
key = '--' + key
if (key in store_true and value) or (key in store_false and not value):
opts += [key, ]
elif value:
opts += [key, str(value)]
return _reload, opts
|
[
"def",
"cleanOptions",
"(",
"options",
")",
":",
"_reload",
"=",
"options",
".",
"pop",
"(",
"'reload'",
")",
"dev",
"=",
"options",
".",
"pop",
"(",
"'dev'",
")",
"opts",
"=",
"[",
"]",
"store_true",
"=",
"[",
"'--nocache'",
",",
"'--global_cache'",
",",
"'--quiet'",
",",
"'--loud'",
"]",
"store_false",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"options",
".",
"items",
"(",
")",
":",
"key",
"=",
"'--'",
"+",
"key",
"if",
"(",
"key",
"in",
"store_true",
"and",
"value",
")",
"or",
"(",
"key",
"in",
"store_false",
"and",
"not",
"value",
")",
":",
"opts",
"+=",
"[",
"key",
",",
"]",
"elif",
"value",
":",
"opts",
"+=",
"[",
"key",
",",
"str",
"(",
"value",
")",
"]",
"return",
"_reload",
",",
"opts"
] |
Takes an options dict and returns a tuple containing the daemonize boolean,
the reload boolean, and the parsed list of cleaned options as would be
expected to be passed to hx
|
[
"Takes",
"an",
"options",
"dict",
"and",
"returns",
"a",
"tuple",
"containing",
"the",
"daemonize",
"boolean",
"the",
"reload",
"boolean",
"and",
"the",
"parsed",
"list",
"of",
"cleaned",
"options",
"as",
"would",
"be",
"expected",
"to",
"be",
"passed",
"to",
"hx"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/options.py#L7-L26
|
18,499
|
hendrix/hendrix
|
hendrix/options.py
|
options
|
def options(argv=[]):
"""
A helper function that returns a dictionary of the default key-values pairs
"""
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0])
|
python
|
def options(argv=[]):
"""
A helper function that returns a dictionary of the default key-values pairs
"""
parser = HendrixOptionParser
parsed_args = parser.parse_args(argv)
return vars(parsed_args[0])
|
[
"def",
"options",
"(",
"argv",
"=",
"[",
"]",
")",
":",
"parser",
"=",
"HendrixOptionParser",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"return",
"vars",
"(",
"parsed_args",
"[",
"0",
"]",
")"
] |
A helper function that returns a dictionary of the default key-values pairs
|
[
"A",
"helper",
"function",
"that",
"returns",
"a",
"dictionary",
"of",
"the",
"default",
"key",
"-",
"values",
"pairs"
] |
175af011a7e5822b772bfec0e11a46466bb8688d
|
https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/options.py#L195-L201
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.