id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
29,600 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | DMatrix.get_uint_info | def get_uint_info(self, field):
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
"""
length = ctypes.c_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.uint32) | python | def get_uint_info(self, field):
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
"""
length = ctypes.c_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.uint32) | [
"def",
"get_uint_info",
"(",
"self",
",",
"field",
")",
":",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"ret",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_uint",
")",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixGetUIntInfo",
... | Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data | [
"Get",
"unsigned",
"integer",
"property",
"from",
"the",
"DMatrix",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L298-L317 |
29,601 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | DMatrix.save_binary | def save_binary(self, fname, silent=True):
"""Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
_check_call(_LIB.XGDMatrixSaveBinary(self.handle,
c_str(fname),
int(silent))) | python | def save_binary(self, fname, silent=True):
"""Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
_check_call(_LIB.XGDMatrixSaveBinary(self.handle,
c_str(fname),
int(silent))) | [
"def",
"save_binary",
"(",
"self",
",",
"fname",
",",
"silent",
"=",
"True",
")",
":",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixSaveBinary",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"fname",
")",
",",
"int",
"(",
"silent",
")",
")",
")"
] | Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed. | [
"Save",
"DMatrix",
"to",
"an",
"XGBoost",
"buffer",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L351-L363 |
29,602 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | DMatrix.num_row | def num_row(self):
"""Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
"""
ret = ctypes.c_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle,
ctypes.byref(ret)))
return ret.value | python | def num_row(self):
"""Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
"""
ret = ctypes.c_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle,
ctypes.byref(ret)))
return ret.value | [
"def",
"num_row",
"(",
"self",
")",
":",
"ret",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGDMatrixNumRow",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"ret",
")",
")",
")",
"return",
"ret",
".",
"... | Get the number of rows in the DMatrix.
Returns
-------
number of rows : int | [
"Get",
"the",
"number",
"of",
"rows",
"in",
"the",
"DMatrix",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L440-L450 |
29,603 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | DMatrix.slice | def slice(self, rindex):
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
res = DMatrix(None, feature_names=self.feature_names)
res.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,
c_array(ctypes.c_int, rindex),
len(rindex),
ctypes.byref(res.handle)))
return res | python | def slice(self, rindex):
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
res = DMatrix(None, feature_names=self.feature_names)
res.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,
c_array(ctypes.c_int, rindex),
len(rindex),
ctypes.byref(res.handle)))
return res | [
"def",
"slice",
"(",
"self",
",",
"rindex",
")",
":",
"res",
"=",
"DMatrix",
"(",
"None",
",",
"feature_names",
"=",
"self",
".",
"feature_names",
")",
"res",
".",
"handle",
"=",
"ctypes",
".",
"c_void_p",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"... | Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices. | [
"Slice",
"the",
"DMatrix",
"and",
"return",
"a",
"new",
"DMatrix",
"that",
"only",
"contains",
"rindex",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L464-L483 |
29,604 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.update | def update(self, dtrain, iteration, fobj=None):
"""
Update for one iteration, with objective function calculated internally.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
if fobj is None:
_check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle))
else:
pred = self.predict(dtrain)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess) | python | def update(self, dtrain, iteration, fobj=None):
"""
Update for one iteration, with objective function calculated internally.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
if fobj is None:
_check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle))
else:
pred = self.predict(dtrain)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess) | [
"def",
"update",
"(",
"self",
",",
"dtrain",
",",
"iteration",
",",
"fobj",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"dtrain",
",",
"DMatrix",
")",
":",
"raise",
"TypeError",
"(",
"'invalid training matrix: {}'",
".",
"format",
"(",
"type",
... | Update for one iteration, with objective function calculated internally.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function. | [
"Update",
"for",
"one",
"iteration",
"with",
"objective",
"function",
"calculated",
"internally",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L664-L686 |
29,605 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.boost | def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))) | python | def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))) | [
"def",
"boost",
"(",
"self",
",",
"dtrain",
",",
"grad",
",",
"hess",
")",
":",
"if",
"len",
"(",
"grad",
")",
"!=",
"len",
"(",
"hess",
")",
":",
"raise",
"ValueError",
"(",
"'grad / hess length mismatch: {} / {}'",
".",
"format",
"(",
"len",
"(",
"gr... | Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient. | [
"Boost",
"the",
"booster",
"for",
"one",
"iteration",
"with",
"customized",
"gradient",
"statistics",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L688-L710 |
29,606 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.eval_set | def eval_set(self, evals, iteration=0, feval=None):
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
"""
if feval is None:
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))
if not isinstance(d[1], STRING_TYPES):
raise TypeError('expected string, got {}'.format(type(d[1]).__name__))
self._validate_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(_LIB.XGBoosterEvalOneIter(self.handle, iteration,
dmats, evnames, len(evals),
ctypes.byref(msg)))
return msg.value
else:
res = '[%d]' % iteration
for dmat, evname in evals:
name, val = feval(self.predict(dmat), dmat)
res += '\t%s-%s:%f' % (evname, name, val)
return res | python | def eval_set(self, evals, iteration=0, feval=None):
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
"""
if feval is None:
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))
if not isinstance(d[1], STRING_TYPES):
raise TypeError('expected string, got {}'.format(type(d[1]).__name__))
self._validate_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(_LIB.XGBoosterEvalOneIter(self.handle, iteration,
dmats, evnames, len(evals),
ctypes.byref(msg)))
return msg.value
else:
res = '[%d]' % iteration
for dmat, evname in evals:
name, val = feval(self.predict(dmat), dmat)
res += '\t%s-%s:%f' % (evname, name, val)
return res | [
"def",
"eval_set",
"(",
"self",
",",
"evals",
",",
"iteration",
"=",
"0",
",",
"feval",
"=",
"None",
")",
":",
"# pylint: disable=invalid-name",
"if",
"feval",
"is",
"None",
":",
"for",
"d",
"in",
"evals",
":",
"if",
"not",
"isinstance",
"(",
"d",
"[",... | Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string. | [
"Evaluate",
"a",
"set",
"of",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L712-L750 |
29,607 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.save_raw | def save_raw(self):
"""
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
"""
length = ctypes.c_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value) | python | def save_raw(self):
"""
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
"""
length = ctypes.c_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value) | [
"def",
"save_raw",
"(",
"self",
")",
":",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"cptr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char",
")",
"(",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterGetModelRaw",
"(",
"self",
".",
... | Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model | [
"Save",
"the",
"model",
"to",
"a",
"in",
"memory",
"buffer",
"represetation"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L840-L853 |
29,608 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.dump_model | def dump_model(self, fout, fmap='', with_stats=False):
"""
Dump model into a text file.
Parameters
----------
foout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool (optional)
Controls whether the split statistics are output.
"""
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats)
for i in range(len(ret)):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close() | python | def dump_model(self, fout, fmap='', with_stats=False):
"""
Dump model into a text file.
Parameters
----------
foout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool (optional)
Controls whether the split statistics are output.
"""
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats)
for i in range(len(ret)):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close() | [
"def",
"dump_model",
"(",
"self",
",",
"fout",
",",
"fmap",
"=",
"''",
",",
"with_stats",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"fout",
",",
"STRING_TYPES",
")",
":",
"fout",
"=",
"open",
"(",
"fout",
",",
"'w'",
")",
"need_close",
"=",
... | Dump model into a text file.
Parameters
----------
foout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool (optional)
Controls whether the split statistics are output. | [
"Dump",
"model",
"into",
"a",
"text",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L875-L898 |
29,609 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.get_dump | def get_dump(self, fmap='', with_stats=False):
"""
Returns the dump the model as a list of strings.
"""
length = ctypes.c_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = int(len(self.feature_names))
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle,
flen,
fname,
ftype,
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModel(self.handle,
c_str(fmap),
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res | python | def get_dump(self, fmap='', with_stats=False):
"""
Returns the dump the model as a list of strings.
"""
length = ctypes.c_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = int(len(self.feature_names))
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle,
flen,
fname,
ftype,
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModel(self.handle,
c_str(fmap),
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res | [
"def",
"get_dump",
"(",
"self",
",",
"fmap",
"=",
"''",
",",
"with_stats",
"=",
"False",
")",
":",
"length",
"=",
"ctypes",
".",
"c_ulong",
"(",
")",
"sarr",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"if",
"self"... | Returns the dump the model as a list of strings. | [
"Returns",
"the",
"dump",
"the",
"model",
"as",
"a",
"list",
"of",
"strings",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L900-L934 |
29,610 | apple/turicreate | src/external/xgboost/python-package/xgboost/core.py | Booster.get_fscore | def get_fscore(self, fmap=''):
"""Get feature importance of each feature.
Parameters
----------
fmap: str (optional)
The name of feature map file
"""
trees = self.get_dump(fmap)
fmap = {}
for tree in trees:
for line in tree.split('\n'):
arr = line.split('[')
if len(arr) == 1:
continue
fid = arr[1].split(']')[0]
fid = fid.split('<')[0]
if fid not in fmap:
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap | python | def get_fscore(self, fmap=''):
"""Get feature importance of each feature.
Parameters
----------
fmap: str (optional)
The name of feature map file
"""
trees = self.get_dump(fmap)
fmap = {}
for tree in trees:
for line in tree.split('\n'):
arr = line.split('[')
if len(arr) == 1:
continue
fid = arr[1].split(']')[0]
fid = fid.split('<')[0]
if fid not in fmap:
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap | [
"def",
"get_fscore",
"(",
"self",
",",
"fmap",
"=",
"''",
")",
":",
"trees",
"=",
"self",
".",
"get_dump",
"(",
"fmap",
")",
"fmap",
"=",
"{",
"}",
"for",
"tree",
"in",
"trees",
":",
"for",
"line",
"in",
"tree",
".",
"split",
"(",
"'\\n'",
")",
... | Get feature importance of each feature.
Parameters
----------
fmap: str (optional)
The name of feature map file | [
"Get",
"feature",
"importance",
"of",
"each",
"feature",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/core.py#L936-L957 |
29,611 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/regex.py | transform | def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
"""
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result | python | def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
"""
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result | [
"def",
"transform",
"(",
"list",
",",
"pattern",
",",
"indices",
"=",
"[",
"1",
"]",
")",
":",
"result",
"=",
"[",
"]",
"for",
"e",
"in",
"list",
":",
"m",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"e",
")",
"if",
"m",
":",
"for",
"i",
... | Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches. | [
"Matches",
"all",
"elements",
"of",
"list",
"agains",
"the",
"pattern",
"and",
"returns",
"a",
"list",
"of",
"the",
"elements",
"indicated",
"by",
"indices",
"of",
"all",
"successfull",
"matches",
".",
"If",
"indices",
"is",
"omitted",
"returns",
"a",
"list"... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L11-L27 |
29,612 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/regex.py | replace | def replace(s, pattern, replacement):
"""Replaces occurrences of a match string in a given
string and returns the new string. The match string
can be a regex expression.
Args:
s (str): the string to modify
pattern (str): the search expression
replacement (str): the string to replace each match with
"""
# the replacement string may contain invalid backreferences (like \1 or \g)
# which will cause python's regex to blow up. Since this should emulate
# the jam version exactly and the jam version didn't support
# backreferences, this version shouldn't either. re.sub
# allows replacement to be a callable; this is being used
# to simply return the replacement string and avoid the hassle
# of worrying about backreferences within the string.
def _replacement(matchobj):
return replacement
return re.sub(pattern, _replacement, s) | python | def replace(s, pattern, replacement):
"""Replaces occurrences of a match string in a given
string and returns the new string. The match string
can be a regex expression.
Args:
s (str): the string to modify
pattern (str): the search expression
replacement (str): the string to replace each match with
"""
# the replacement string may contain invalid backreferences (like \1 or \g)
# which will cause python's regex to blow up. Since this should emulate
# the jam version exactly and the jam version didn't support
# backreferences, this version shouldn't either. re.sub
# allows replacement to be a callable; this is being used
# to simply return the replacement string and avoid the hassle
# of worrying about backreferences within the string.
def _replacement(matchobj):
return replacement
return re.sub(pattern, _replacement, s) | [
"def",
"replace",
"(",
"s",
",",
"pattern",
",",
"replacement",
")",
":",
"# the replacement string may contain invalid backreferences (like \\1 or \\g)",
"# which will cause python's regex to blow up. Since this should emulate",
"# the jam version exactly and the jam version didn't support"... | Replaces occurrences of a match string in a given
string and returns the new string. The match string
can be a regex expression.
Args:
s (str): the string to modify
pattern (str): the search expression
replacement (str): the string to replace each match with | [
"Replaces",
"occurrences",
"of",
"a",
"match",
"string",
"in",
"a",
"given",
"string",
"and",
"returns",
"the",
"new",
"string",
".",
"The",
"match",
"string",
"can",
"be",
"a",
"regex",
"expression",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L31-L50 |
29,613 | apple/turicreate | deps/src/boost_1_68_0/tools/build/src/util/regex.py | replace_list | def replace_list(items, match, replacement):
"""Replaces occurrences of a match string in a given list of strings and returns
a list of new strings. The match string can be a regex expression.
Args:
items (list): the list of strings to modify.
match (str): the search expression.
replacement (str): the string to replace with.
"""
return [replace(item, match, replacement) for item in items] | python | def replace_list(items, match, replacement):
"""Replaces occurrences of a match string in a given list of strings and returns
a list of new strings. The match string can be a regex expression.
Args:
items (list): the list of strings to modify.
match (str): the search expression.
replacement (str): the string to replace with.
"""
return [replace(item, match, replacement) for item in items] | [
"def",
"replace_list",
"(",
"items",
",",
"match",
",",
"replacement",
")",
":",
"return",
"[",
"replace",
"(",
"item",
",",
"match",
",",
"replacement",
")",
"for",
"item",
"in",
"items",
"]"
] | Replaces occurrences of a match string in a given list of strings and returns
a list of new strings. The match string can be a regex expression.
Args:
items (list): the list of strings to modify.
match (str): the search expression.
replacement (str): the string to replace with. | [
"Replaces",
"occurrences",
"of",
"a",
"match",
"string",
"in",
"a",
"given",
"list",
"of",
"strings",
"and",
"returns",
"a",
"list",
"of",
"new",
"strings",
".",
"The",
"match",
"string",
"can",
"be",
"a",
"regex",
"expression",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/util/regex.py#L54-L63 |
29,614 | apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | create | def create(dataset,
num_topics=10,
initial_topics=None,
alpha=None,
beta=.1,
num_iterations=10,
num_burnin=5,
associations=None,
verbose=False,
print_interval=10,
validation_set=None,
method='auto'):
"""
Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity']
"""
dataset = _check_input(dataset)
_check_categorical_option_type("method", method, ['auto', 'cgs', 'alias'])
if method == 'cgs' or method == 'auto':
model_name = 'cgs_topic_model'
else:
model_name = 'alias_topic_model'
# If associations are provided, check they are in the proper format
if associations is None:
associations = _turicreate.SFrame({'word': [], 'topic': []})
if isinstance(associations, _turicreate.SFrame) and \
associations.num_rows() > 0:
assert set(associations.column_names()) == set(['word', 'topic']), \
"Provided associations must be an SFrame containing a word column\
and a topic column."
assert associations['word'].dtype == str, \
"Words must be strings."
assert associations['topic'].dtype == int, \
"Topic ids must be of int type."
if alpha is None:
alpha = float(50) / num_topics
if validation_set is not None:
_check_input(validation_set) # Must be a single column
if isinstance(validation_set, _turicreate.SFrame):
column_name = validation_set.column_names()[0]
validation_set = validation_set[column_name]
(validation_train, validation_test) = _random_split(validation_set)
else:
validation_train = _SArray()
validation_test = _SArray()
opts = {'model_name': model_name,
'data': dataset,
'num_topics': num_topics,
'num_iterations': num_iterations,
'print_interval': print_interval,
'alpha': alpha,
'beta': beta,
'num_burnin': num_burnin,
'associations': associations}
# Initialize the model with basic parameters
response = _turicreate.extensions._text.topicmodel_init(opts)
m = TopicModel(response['model'])
# If initial_topics provided, load it into the model
if isinstance(initial_topics, _turicreate.SFrame):
assert set(['vocabulary', 'topic_probabilities']) == \
set(initial_topics.column_names()), \
"The provided initial_topics does not have the proper format, \
e.g. wrong column names."
observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x))
assert all(observed_topics == num_topics), \
"Provided num_topics value does not match the number of provided initial_topics."
# Rough estimate of total number of words
weight = len(dataset) * 1000
opts = {'model': m.__proxy__,
'topics': initial_topics['topic_probabilities'],
'vocabulary': initial_topics['vocabulary'],
'weight': weight}
response = _turicreate.extensions._text.topicmodel_set_topics(opts)
m = TopicModel(response['model'])
# Train the model on the given data set and retrieve predictions
opts = {'model': m.__proxy__,
'data': dataset,
'verbose': verbose,
'validation_train': validation_train,
'validation_test': validation_test}
response = _turicreate.extensions._text.topicmodel_train(opts)
m = TopicModel(response['model'])
return m | python | def create(dataset,
num_topics=10,
initial_topics=None,
alpha=None,
beta=.1,
num_iterations=10,
num_burnin=5,
associations=None,
verbose=False,
print_interval=10,
validation_set=None,
method='auto'):
"""
Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity']
"""
dataset = _check_input(dataset)
_check_categorical_option_type("method", method, ['auto', 'cgs', 'alias'])
if method == 'cgs' or method == 'auto':
model_name = 'cgs_topic_model'
else:
model_name = 'alias_topic_model'
# If associations are provided, check they are in the proper format
if associations is None:
associations = _turicreate.SFrame({'word': [], 'topic': []})
if isinstance(associations, _turicreate.SFrame) and \
associations.num_rows() > 0:
assert set(associations.column_names()) == set(['word', 'topic']), \
"Provided associations must be an SFrame containing a word column\
and a topic column."
assert associations['word'].dtype == str, \
"Words must be strings."
assert associations['topic'].dtype == int, \
"Topic ids must be of int type."
if alpha is None:
alpha = float(50) / num_topics
if validation_set is not None:
_check_input(validation_set) # Must be a single column
if isinstance(validation_set, _turicreate.SFrame):
column_name = validation_set.column_names()[0]
validation_set = validation_set[column_name]
(validation_train, validation_test) = _random_split(validation_set)
else:
validation_train = _SArray()
validation_test = _SArray()
opts = {'model_name': model_name,
'data': dataset,
'num_topics': num_topics,
'num_iterations': num_iterations,
'print_interval': print_interval,
'alpha': alpha,
'beta': beta,
'num_burnin': num_burnin,
'associations': associations}
# Initialize the model with basic parameters
response = _turicreate.extensions._text.topicmodel_init(opts)
m = TopicModel(response['model'])
# If initial_topics provided, load it into the model
if isinstance(initial_topics, _turicreate.SFrame):
assert set(['vocabulary', 'topic_probabilities']) == \
set(initial_topics.column_names()), \
"The provided initial_topics does not have the proper format, \
e.g. wrong column names."
observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x))
assert all(observed_topics == num_topics), \
"Provided num_topics value does not match the number of provided initial_topics."
# Rough estimate of total number of words
weight = len(dataset) * 1000
opts = {'model': m.__proxy__,
'topics': initial_topics['topic_probabilities'],
'vocabulary': initial_topics['vocabulary'],
'weight': weight}
response = _turicreate.extensions._text.topicmodel_set_topics(opts)
m = TopicModel(response['model'])
# Train the model on the given data set and retrieve predictions
opts = {'model': m.__proxy__,
'data': dataset,
'verbose': verbose,
'validation_train': validation_train,
'validation_test': validation_test}
response = _turicreate.extensions._text.topicmodel_train(opts)
m = TopicModel(response['model'])
return m | [
"def",
"create",
"(",
"dataset",
",",
"num_topics",
"=",
"10",
",",
"initial_topics",
"=",
"None",
",",
"alpha",
"=",
"None",
",",
"beta",
"=",
".1",
",",
"num_iterations",
"=",
"10",
",",
"num_burnin",
"=",
"5",
",",
"associations",
"=",
"None",
",",
... | Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity'] | [
"Create",
"a",
"topic",
"model",
"from",
"the",
"given",
"data",
"set",
".",
"A",
"topic",
"model",
"assumes",
"each",
"document",
"is",
"a",
"mixture",
"of",
"a",
"set",
"of",
"topics",
"where",
"for",
"each",
"topic",
"some",
"words",
"are",
"more",
... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L35-L271 |
29,615 | apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | perplexity | def perplexity(test_data, predictions, topics, vocabulary):
"""
Compute the perplexity of a set of test documents given a set
of predicted topics.
Let theta be the matrix of document-topic probabilities, where
theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic
probabilities, where phi_jk = p(word j | topic k).
Then for each word in each document, we compute for a given word w
and document d
.. math::
p(word | \theta[doc_id,:], \phi[word_id,:]) =
\sum_k \theta[doc_id, k] * \phi[word_id, k]
We compute loglikelihood to be:
.. math::
l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi)
and perplexity to be
.. math::
\exp \{ - l(D) / \sum_i \sum_j count_{i,j} \}
Parameters
----------
test_data : SArray of type dict or SFrame with a single column of type dict
Documents in bag-of-words format.
predictions : SArray
An SArray of vector type, where each vector contains estimates of the
probability that this document belongs to each of the topics.
This must have the same size as test_data; otherwise an exception
occurs. This can be the output of
:py:func:`~turicreate.topic_model.TopicModel.predict`, for example.
topics : SFrame
An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'.
The value returned by m['topics'] is a valid input for this argument,
where m is a trained :py:class:`~turicreate.topic_model.TopicModel`.
vocabulary : SArray
An SArray of words to use. All words in test_data that are not in this
vocabulary will be ignored.
Notes
-----
For more details, see equations 13-16 of [PattersonTeh2013].
References
----------
.. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_
.. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian
Langevin Dynamics on the Probability Simplex"
<http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_
NIPS, 2013.
Examples
--------
>>> from turicreate import topic_model
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = topic_model.create(train_data)
>>> pred = m.predict(train_data)
>>> topics = m['topics']
>>> p = topic_model.perplexity(test_data, pred,
topics['topic_probabilities'],
topics['vocabulary'])
>>> p
1720.7 # lower values are better
"""
test_data = _check_input(test_data)
assert isinstance(predictions, _SArray), \
"Predictions must be an SArray of vector type."
assert predictions.dtype == _array.array, \
"Predictions must be probabilities. Try using m.predict() with " + \
"output_type='probability'."
opts = {'test_data': test_data,
'predictions': predictions,
'topics': topics,
'vocabulary': vocabulary}
response = _turicreate.extensions._text.topicmodel_get_perplexity(opts)
return response['perplexity'] | python | def perplexity(test_data, predictions, topics, vocabulary):
"""
Compute the perplexity of a set of test documents given a set
of predicted topics.
Let theta be the matrix of document-topic probabilities, where
theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic
probabilities, where phi_jk = p(word j | topic k).
Then for each word in each document, we compute for a given word w
and document d
.. math::
p(word | \theta[doc_id,:], \phi[word_id,:]) =
\sum_k \theta[doc_id, k] * \phi[word_id, k]
We compute loglikelihood to be:
.. math::
l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi)
and perplexity to be
.. math::
\exp \{ - l(D) / \sum_i \sum_j count_{i,j} \}
Parameters
----------
test_data : SArray of type dict or SFrame with a single column of type dict
Documents in bag-of-words format.
predictions : SArray
An SArray of vector type, where each vector contains estimates of the
probability that this document belongs to each of the topics.
This must have the same size as test_data; otherwise an exception
occurs. This can be the output of
:py:func:`~turicreate.topic_model.TopicModel.predict`, for example.
topics : SFrame
An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'.
The value returned by m['topics'] is a valid input for this argument,
where m is a trained :py:class:`~turicreate.topic_model.TopicModel`.
vocabulary : SArray
An SArray of words to use. All words in test_data that are not in this
vocabulary will be ignored.
Notes
-----
For more details, see equations 13-16 of [PattersonTeh2013].
References
----------
.. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_
.. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian
Langevin Dynamics on the Probability Simplex"
<http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_
NIPS, 2013.
Examples
--------
>>> from turicreate import topic_model
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = topic_model.create(train_data)
>>> pred = m.predict(train_data)
>>> topics = m['topics']
>>> p = topic_model.perplexity(test_data, pred,
topics['topic_probabilities'],
topics['vocabulary'])
>>> p
1720.7 # lower values are better
"""
test_data = _check_input(test_data)
assert isinstance(predictions, _SArray), \
"Predictions must be an SArray of vector type."
assert predictions.dtype == _array.array, \
"Predictions must be probabilities. Try using m.predict() with " + \
"output_type='probability'."
opts = {'test_data': test_data,
'predictions': predictions,
'topics': topics,
'vocabulary': vocabulary}
response = _turicreate.extensions._text.topicmodel_get_perplexity(opts)
return response['perplexity'] | [
"def",
"perplexity",
"(",
"test_data",
",",
"predictions",
",",
"topics",
",",
"vocabulary",
")",
":",
"test_data",
"=",
"_check_input",
"(",
"test_data",
")",
"assert",
"isinstance",
"(",
"predictions",
",",
"_SArray",
")",
",",
"\"Predictions must be an SArray o... | Compute the perplexity of a set of test documents given a set
of predicted topics.
Let theta be the matrix of document-topic probabilities, where
theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic
probabilities, where phi_jk = p(word j | topic k).
Then for each word in each document, we compute for a given word w
and document d
.. math::
p(word | \theta[doc_id,:], \phi[word_id,:]) =
\sum_k \theta[doc_id, k] * \phi[word_id, k]
We compute loglikelihood to be:
.. math::
l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi)
and perplexity to be
.. math::
\exp \{ - l(D) / \sum_i \sum_j count_{i,j} \}
Parameters
----------
test_data : SArray of type dict or SFrame with a single column of type dict
Documents in bag-of-words format.
predictions : SArray
An SArray of vector type, where each vector contains estimates of the
probability that this document belongs to each of the topics.
This must have the same size as test_data; otherwise an exception
occurs. This can be the output of
:py:func:`~turicreate.topic_model.TopicModel.predict`, for example.
topics : SFrame
An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'.
The value returned by m['topics'] is a valid input for this argument,
where m is a trained :py:class:`~turicreate.topic_model.TopicModel`.
vocabulary : SArray
An SArray of words to use. All words in test_data that are not in this
vocabulary will be ignored.
Notes
-----
For more details, see equations 13-16 of [PattersonTeh2013].
References
----------
.. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_
.. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian
Langevin Dynamics on the Probability Simplex"
<http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_
NIPS, 2013.
Examples
--------
>>> from turicreate import topic_model
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = topic_model.create(train_data)
>>> pred = m.predict(train_data)
>>> topics = m['topics']
>>> p = topic_model.perplexity(test_data, pred,
topics['topic_probabilities'],
topics['vocabulary'])
>>> p
1720.7 # lower values are better | [
"Compute",
"the",
"perplexity",
"of",
"a",
"set",
"of",
"test",
"documents",
"given",
"a",
"set",
"of",
"predicted",
"topics",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L740-L826 |
29,616 | apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | TopicModel.get_topics | def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0,
output_type='topic_probabilities'):
"""
Get the words associated with a given topic. The score column is the
probability of choosing that word given that you have chosen a
particular topic.
Parameters
----------
topic_ids : list of int, optional
The topics to retrieve words. Topic ids are zero-based.
Throws an error if greater than or equal to m['num_topics'], or
if the requested topic name is not present.
num_words : int, optional
The number of words to show.
cdf_cutoff : float, optional
Allows one to only show the most probable words whose cumulative
probability is below this cutoff. For example if there exist
three words where
.. math::
p(word_1 | topic_k) = .1
p(word_2 | topic_k) = .2
p(word_3 | topic_k) = .05
then setting :math:`cdf_{cutoff}=.3` would return only
:math:`word_1` and :math:`word_2` since
:math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}`
output_type : {'topic_probabilities' | 'topic_words'}, optional
Determine the type of desired output. See below.
Returns
-------
out : SFrame
If output_type is 'topic_probabilities', then the returned value is
an SFrame with a column of words ranked by a column of scores for
each topic. Otherwise, the returned value is a SArray where
each element is a list of the most probable words for each topic.
Examples
--------
Get the highest ranked words for all topics.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs,
num_iterations=50)
>>> m.get_topics()
+-------+----------+-----------------+
| topic | word | score |
+-------+----------+-----------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 1 | function | 0.0482834508265 |
| 1 | input | 0.0456270024091 |
| 1 | point | 0.0302662839454 |
| 1 | result | 0.0239474934631 |
| 1 | problem | 0.0231750116011 |
| ... | ... | ... |
+-------+----------+-----------------+
Get the highest ranked words for topics 0 and 1 and show 15 words per
topic.
>>> m.get_topics([0, 1], num_words=15)
+-------+----------+------------------+
| topic | word | score |
+-------+----------+------------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 0 | response | 0.0139740298286 |
| 0 | layer | 0.0122585145062 |
| 0 | features | 0.0115343177265 |
| 0 | feature | 0.0103530459301 |
| 0 | spatial | 0.00823387994361 |
| ... | ... | ... |
+-------+----------+------------------+
If one wants to instead just get the top words per topic, one may
change the format of the output as follows.
>>> topics = m.get_topics(output_type='topic_words')
dtype: list
Rows: 10
[['cell', 'image', 'input', 'object', 'visual'],
['algorithm', 'data', 'learning', 'method', 'set'],
['function', 'input', 'point', 'problem', 'result'],
['model', 'output', 'pattern', 'set', 'unit'],
['action', 'learning', 'net', 'problem', 'system'],
['error', 'function', 'network', 'parameter', 'weight'],
['information', 'level', 'neural', 'threshold', 'weight'],
['control', 'field', 'model', 'network', 'neuron'],
['hidden', 'layer', 'system', 'training', 'vector'],
['component', 'distribution', 'local', 'model', 'optimal']]
"""
_check_categorical_option_type('output_type', output_type,
['topic_probabilities', 'topic_words'])
if topic_ids is None:
topic_ids = list(range(self._get('num_topics')))
assert isinstance(topic_ids, list), \
"The provided topic_ids is not a list."
if any([type(x) == str for x in topic_ids]):
raise ValueError("Only integer topic_ids can be used at this point in time.")
if not all([x >= 0 and x < self.num_topics for x in topic_ids]):
raise ValueError("Topic id values must be non-negative and less than the " + \
"number of topics used to fit the model.")
opts = {'model': self.__proxy__,
'topic_ids': topic_ids,
'num_words': num_words,
'cdf_cutoff': cdf_cutoff}
response = _turicreate.extensions._text.topicmodel_get_topic(opts)
ret = response['top_words']
def sort_wordlist_by_prob(z):
words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True)
return [word for (word, prob) in words]
if output_type != 'topic_probabilities':
ret = ret.groupby('topic',
{'word': _turicreate.aggregate.CONCAT('word', 'score')})
words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob)
ret = _SFrame({'words': words})
return ret | python | def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0,
output_type='topic_probabilities'):
"""
Get the words associated with a given topic. The score column is the
probability of choosing that word given that you have chosen a
particular topic.
Parameters
----------
topic_ids : list of int, optional
The topics to retrieve words. Topic ids are zero-based.
Throws an error if greater than or equal to m['num_topics'], or
if the requested topic name is not present.
num_words : int, optional
The number of words to show.
cdf_cutoff : float, optional
Allows one to only show the most probable words whose cumulative
probability is below this cutoff. For example if there exist
three words where
.. math::
p(word_1 | topic_k) = .1
p(word_2 | topic_k) = .2
p(word_3 | topic_k) = .05
then setting :math:`cdf_{cutoff}=.3` would return only
:math:`word_1` and :math:`word_2` since
:math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}`
output_type : {'topic_probabilities' | 'topic_words'}, optional
Determine the type of desired output. See below.
Returns
-------
out : SFrame
If output_type is 'topic_probabilities', then the returned value is
an SFrame with a column of words ranked by a column of scores for
each topic. Otherwise, the returned value is a SArray where
each element is a list of the most probable words for each topic.
Examples
--------
Get the highest ranked words for all topics.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs,
num_iterations=50)
>>> m.get_topics()
+-------+----------+-----------------+
| topic | word | score |
+-------+----------+-----------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 1 | function | 0.0482834508265 |
| 1 | input | 0.0456270024091 |
| 1 | point | 0.0302662839454 |
| 1 | result | 0.0239474934631 |
| 1 | problem | 0.0231750116011 |
| ... | ... | ... |
+-------+----------+-----------------+
Get the highest ranked words for topics 0 and 1 and show 15 words per
topic.
>>> m.get_topics([0, 1], num_words=15)
+-------+----------+------------------+
| topic | word | score |
+-------+----------+------------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 0 | response | 0.0139740298286 |
| 0 | layer | 0.0122585145062 |
| 0 | features | 0.0115343177265 |
| 0 | feature | 0.0103530459301 |
| 0 | spatial | 0.00823387994361 |
| ... | ... | ... |
+-------+----------+------------------+
If one wants to instead just get the top words per topic, one may
change the format of the output as follows.
>>> topics = m.get_topics(output_type='topic_words')
dtype: list
Rows: 10
[['cell', 'image', 'input', 'object', 'visual'],
['algorithm', 'data', 'learning', 'method', 'set'],
['function', 'input', 'point', 'problem', 'result'],
['model', 'output', 'pattern', 'set', 'unit'],
['action', 'learning', 'net', 'problem', 'system'],
['error', 'function', 'network', 'parameter', 'weight'],
['information', 'level', 'neural', 'threshold', 'weight'],
['control', 'field', 'model', 'network', 'neuron'],
['hidden', 'layer', 'system', 'training', 'vector'],
['component', 'distribution', 'local', 'model', 'optimal']]
"""
_check_categorical_option_type('output_type', output_type,
['topic_probabilities', 'topic_words'])
if topic_ids is None:
topic_ids = list(range(self._get('num_topics')))
assert isinstance(topic_ids, list), \
"The provided topic_ids is not a list."
if any([type(x) == str for x in topic_ids]):
raise ValueError("Only integer topic_ids can be used at this point in time.")
if not all([x >= 0 and x < self.num_topics for x in topic_ids]):
raise ValueError("Topic id values must be non-negative and less than the " + \
"number of topics used to fit the model.")
opts = {'model': self.__proxy__,
'topic_ids': topic_ids,
'num_words': num_words,
'cdf_cutoff': cdf_cutoff}
response = _turicreate.extensions._text.topicmodel_get_topic(opts)
ret = response['top_words']
def sort_wordlist_by_prob(z):
words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True)
return [word for (word, prob) in words]
if output_type != 'topic_probabilities':
ret = ret.groupby('topic',
{'word': _turicreate.aggregate.CONCAT('word', 'score')})
words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob)
ret = _SFrame({'words': words})
return ret | [
"def",
"get_topics",
"(",
"self",
",",
"topic_ids",
"=",
"None",
",",
"num_words",
"=",
"5",
",",
"cdf_cutoff",
"=",
"1.0",
",",
"output_type",
"=",
"'topic_probabilities'",
")",
":",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",... | Get the words associated with a given topic. The score column is the
probability of choosing that word given that you have chosen a
particular topic.
Parameters
----------
topic_ids : list of int, optional
The topics to retrieve words. Topic ids are zero-based.
Throws an error if greater than or equal to m['num_topics'], or
if the requested topic name is not present.
num_words : int, optional
The number of words to show.
cdf_cutoff : float, optional
Allows one to only show the most probable words whose cumulative
probability is below this cutoff. For example if there exist
three words where
.. math::
p(word_1 | topic_k) = .1
p(word_2 | topic_k) = .2
p(word_3 | topic_k) = .05
then setting :math:`cdf_{cutoff}=.3` would return only
:math:`word_1` and :math:`word_2` since
:math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}`
output_type : {'topic_probabilities' | 'topic_words'}, optional
Determine the type of desired output. See below.
Returns
-------
out : SFrame
If output_type is 'topic_probabilities', then the returned value is
an SFrame with a column of words ranked by a column of scores for
each topic. Otherwise, the returned value is a SArray where
each element is a list of the most probable words for each topic.
Examples
--------
Get the highest ranked words for all topics.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs,
num_iterations=50)
>>> m.get_topics()
+-------+----------+-----------------+
| topic | word | score |
+-------+----------+-----------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 1 | function | 0.0482834508265 |
| 1 | input | 0.0456270024091 |
| 1 | point | 0.0302662839454 |
| 1 | result | 0.0239474934631 |
| 1 | problem | 0.0231750116011 |
| ... | ... | ... |
+-------+----------+-----------------+
Get the highest ranked words for topics 0 and 1 and show 15 words per
topic.
>>> m.get_topics([0, 1], num_words=15)
+-------+----------+------------------+
| topic | word | score |
+-------+----------+------------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 0 | response | 0.0139740298286 |
| 0 | layer | 0.0122585145062 |
| 0 | features | 0.0115343177265 |
| 0 | feature | 0.0103530459301 |
| 0 | spatial | 0.00823387994361 |
| ... | ... | ... |
+-------+----------+------------------+
If one wants to instead just get the top words per topic, one may
change the format of the output as follows.
>>> topics = m.get_topics(output_type='topic_words')
dtype: list
Rows: 10
[['cell', 'image', 'input', 'object', 'visual'],
['algorithm', 'data', 'learning', 'method', 'set'],
['function', 'input', 'point', 'problem', 'result'],
['model', 'output', 'pattern', 'set', 'unit'],
['action', 'learning', 'net', 'problem', 'system'],
['error', 'function', 'network', 'parameter', 'weight'],
['information', 'level', 'neural', 'threshold', 'weight'],
['control', 'field', 'model', 'network', 'neuron'],
['hidden', 'layer', 'system', 'training', 'vector'],
['component', 'distribution', 'local', 'model', 'optimal']] | [
"Get",
"the",
"words",
"associated",
"with",
"a",
"given",
"topic",
".",
"The",
"score",
"column",
"is",
"the",
"probability",
"of",
"choosing",
"that",
"word",
"given",
"that",
"you",
"have",
"chosen",
"a",
"particular",
"topic",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L430-L568 |
29,617 | apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | TopicModel.predict | def predict(self, dataset, output_type='assignment', num_burnin=None):
"""
Use the model to predict topics for each document. The provided
`dataset` should be an SArray object where each element is a dict
representing a single document in bag-of-words format, where keys
are words and values are their corresponding counts. If `dataset` is
an SFrame, then it must contain a single column of dict type.
The current implementation will make inferences about each document
given its estimates of the topics learned when creating the model.
This is done via Gibbs sampling.
Parameters
----------
dataset : SArray, SFrame of type dict
A set of documents to use for making predictions.
output_type : str, optional
The type of output desired. This can either be
- assignment: the returned values are integers in [0, num_topics)
- probability: each returned prediction is a vector with length
num_topics, where element k represents the probability that
document belongs to topic k.
num_burnin : int, optional
The number of iterations of Gibbs sampling to perform when
inferring the topics for documents at prediction time.
If provided this will override the burnin value set during
training.
Returns
-------
out : SArray
See Also
--------
evaluate
Examples
--------
Make predictions about which topic each document belongs to.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> pred = m.predict(docs)
If one is interested in the probability of each topic
>>> pred = m.predict(docs, output_type='probability')
Notes
-----
For each unique word w in a document d, we sample an assignment to
topic k with probability proportional to
.. math::
p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k}
where
- :math:`W` is the size of the vocabulary,
- :math:`n_{d,k}` is the number of other times we have assigned a word in
document to d to topic :math:`k`,
- :math:`\Phi_{w,k}` is the probability under the model of choosing word
:math:`w` given the word is of topic :math:`k`. This is the matrix
returned by calling `m['topics']`.
This represents a collapsed Gibbs sampler for the document assignments
while we keep the topics learned during training fixed.
This process is done in parallel across all documents, five times per
document.
"""
dataset = _check_input(dataset)
if num_burnin is None:
num_burnin = self.num_burnin
opts = {'model': self.__proxy__,
'data': dataset,
'num_burnin': num_burnin}
response = _turicreate.extensions._text.topicmodel_predict(opts)
preds = response['predictions']
# Get most likely topic if probabilities are not requested
if output_type not in ['probability', 'probabilities', 'prob']:
# equivalent to numpy.argmax(x)
preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1])
return preds | python | def predict(self, dataset, output_type='assignment', num_burnin=None):
"""
Use the model to predict topics for each document. The provided
`dataset` should be an SArray object where each element is a dict
representing a single document in bag-of-words format, where keys
are words and values are their corresponding counts. If `dataset` is
an SFrame, then it must contain a single column of dict type.
The current implementation will make inferences about each document
given its estimates of the topics learned when creating the model.
This is done via Gibbs sampling.
Parameters
----------
dataset : SArray, SFrame of type dict
A set of documents to use for making predictions.
output_type : str, optional
The type of output desired. This can either be
- assignment: the returned values are integers in [0, num_topics)
- probability: each returned prediction is a vector with length
num_topics, where element k represents the probability that
document belongs to topic k.
num_burnin : int, optional
The number of iterations of Gibbs sampling to perform when
inferring the topics for documents at prediction time.
If provided this will override the burnin value set during
training.
Returns
-------
out : SArray
See Also
--------
evaluate
Examples
--------
Make predictions about which topic each document belongs to.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> pred = m.predict(docs)
If one is interested in the probability of each topic
>>> pred = m.predict(docs, output_type='probability')
Notes
-----
For each unique word w in a document d, we sample an assignment to
topic k with probability proportional to
.. math::
p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k}
where
- :math:`W` is the size of the vocabulary,
- :math:`n_{d,k}` is the number of other times we have assigned a word in
document to d to topic :math:`k`,
- :math:`\Phi_{w,k}` is the probability under the model of choosing word
:math:`w` given the word is of topic :math:`k`. This is the matrix
returned by calling `m['topics']`.
This represents a collapsed Gibbs sampler for the document assignments
while we keep the topics learned during training fixed.
This process is done in parallel across all documents, five times per
document.
"""
dataset = _check_input(dataset)
if num_burnin is None:
num_burnin = self.num_burnin
opts = {'model': self.__proxy__,
'data': dataset,
'num_burnin': num_burnin}
response = _turicreate.extensions._text.topicmodel_predict(opts)
preds = response['predictions']
# Get most likely topic if probabilities are not requested
if output_type not in ['probability', 'probabilities', 'prob']:
# equivalent to numpy.argmax(x)
preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1])
return preds | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'assignment'",
",",
"num_burnin",
"=",
"None",
")",
":",
"dataset",
"=",
"_check_input",
"(",
"dataset",
")",
"if",
"num_burnin",
"is",
"None",
":",
"num_burnin",
"=",
"self",
".",
... | Use the model to predict topics for each document. The provided
`dataset` should be an SArray object where each element is a dict
representing a single document in bag-of-words format, where keys
are words and values are their corresponding counts. If `dataset` is
an SFrame, then it must contain a single column of dict type.
The current implementation will make inferences about each document
given its estimates of the topics learned when creating the model.
This is done via Gibbs sampling.
Parameters
----------
dataset : SArray, SFrame of type dict
A set of documents to use for making predictions.
output_type : str, optional
The type of output desired. This can either be
- assignment: the returned values are integers in [0, num_topics)
- probability: each returned prediction is a vector with length
num_topics, where element k represents the probability that
document belongs to topic k.
num_burnin : int, optional
The number of iterations of Gibbs sampling to perform when
inferring the topics for documents at prediction time.
If provided this will override the burnin value set during
training.
Returns
-------
out : SArray
See Also
--------
evaluate
Examples
--------
Make predictions about which topic each document belongs to.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> pred = m.predict(docs)
If one is interested in the probability of each topic
>>> pred = m.predict(docs, output_type='probability')
Notes
-----
For each unique word w in a document d, we sample an assignment to
topic k with probability proportional to
.. math::
p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k}
where
- :math:`W` is the size of the vocabulary,
- :math:`n_{d,k}` is the number of other times we have assigned a word in
document to d to topic :math:`k`,
- :math:`\Phi_{w,k}` is the probability under the model of choosing word
:math:`w` given the word is of topic :math:`k`. This is the matrix
returned by calling `m['topics']`.
This represents a collapsed Gibbs sampler for the document assignments
while we keep the topics learned during training fixed.
This process is done in parallel across all documents, five times per
document. | [
"Use",
"the",
"model",
"to",
"predict",
"topics",
"for",
"each",
"document",
".",
"The",
"provided",
"dataset",
"should",
"be",
"an",
"SArray",
"object",
"where",
"each",
"element",
"is",
"a",
"dict",
"representing",
"a",
"single",
"document",
"in",
"bag",
... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L570-L660 |
29,618 | apple/turicreate | src/unity/python/turicreate/toolkits/topic_model/topic_model.py | TopicModel.evaluate | def evaluate(self, train_data, test_data=None, metric='perplexity'):
"""
Estimate the model's ability to predict new data. Imagine you have a
corpus of books. One common approach to evaluating topic models is to
train on the first half of all of the books and see how well the model
predicts the second half of each book.
This method returns a metric called perplexity, which is related to the
likelihood of observing these words under the given model. See
:py:func:`~turicreate.topic_model.perplexity` for more details.
The provided `train_data` and `test_data` must have the same length,
i.e., both data sets must have the same number of documents; the model
will use train_data to estimate which topic the document belongs to, and
this is used to estimate the model's performance at predicting the
unseen words in the test data.
See :py:func:`~turicreate.topic_model.TopicModel.predict` for details
on how these predictions are made, and see
:py:func:`~turicreate.text_analytics.random_split` for a helper function
that can be used for making train/test splits.
Parameters
----------
train_data : SArray or SFrame
A set of documents to predict topics for.
test_data : SArray or SFrame, optional
A set of documents to evaluate performance on.
By default this will set to be the same as train_data.
metric : str
The chosen metric to use for evaluating the topic model.
Currently only 'perplexity' is supported.
Returns
-------
out : dict
The set of estimated evaluation metrics.
See Also
--------
predict, turicreate.toolkits.text_analytics.random_split
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = turicreate.topic_model.create(train_data)
>>> m.evaluate(train_data, test_data)
{'perplexity': 2467.530370396021}
"""
train_data = _check_input(train_data)
if test_data is None:
test_data = train_data
else:
test_data = _check_input(test_data)
predictions = self.predict(train_data, output_type='probability')
topics = self.topics
ret = {}
ret['perplexity'] = perplexity(test_data,
predictions,
topics['topic_probabilities'],
topics['vocabulary'])
return ret | python | def evaluate(self, train_data, test_data=None, metric='perplexity'):
"""
Estimate the model's ability to predict new data. Imagine you have a
corpus of books. One common approach to evaluating topic models is to
train on the first half of all of the books and see how well the model
predicts the second half of each book.
This method returns a metric called perplexity, which is related to the
likelihood of observing these words under the given model. See
:py:func:`~turicreate.topic_model.perplexity` for more details.
The provided `train_data` and `test_data` must have the same length,
i.e., both data sets must have the same number of documents; the model
will use train_data to estimate which topic the document belongs to, and
this is used to estimate the model's performance at predicting the
unseen words in the test data.
See :py:func:`~turicreate.topic_model.TopicModel.predict` for details
on how these predictions are made, and see
:py:func:`~turicreate.text_analytics.random_split` for a helper function
that can be used for making train/test splits.
Parameters
----------
train_data : SArray or SFrame
A set of documents to predict topics for.
test_data : SArray or SFrame, optional
A set of documents to evaluate performance on.
By default this will set to be the same as train_data.
metric : str
The chosen metric to use for evaluating the topic model.
Currently only 'perplexity' is supported.
Returns
-------
out : dict
The set of estimated evaluation metrics.
See Also
--------
predict, turicreate.toolkits.text_analytics.random_split
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = turicreate.topic_model.create(train_data)
>>> m.evaluate(train_data, test_data)
{'perplexity': 2467.530370396021}
"""
train_data = _check_input(train_data)
if test_data is None:
test_data = train_data
else:
test_data = _check_input(test_data)
predictions = self.predict(train_data, output_type='probability')
topics = self.topics
ret = {}
ret['perplexity'] = perplexity(test_data,
predictions,
topics['topic_probabilities'],
topics['vocabulary'])
return ret | [
"def",
"evaluate",
"(",
"self",
",",
"train_data",
",",
"test_data",
"=",
"None",
",",
"metric",
"=",
"'perplexity'",
")",
":",
"train_data",
"=",
"_check_input",
"(",
"train_data",
")",
"if",
"test_data",
"is",
"None",
":",
"test_data",
"=",
"train_data",
... | Estimate the model's ability to predict new data. Imagine you have a
corpus of books. One common approach to evaluating topic models is to
train on the first half of all of the books and see how well the model
predicts the second half of each book.
This method returns a metric called perplexity, which is related to the
likelihood of observing these words under the given model. See
:py:func:`~turicreate.topic_model.perplexity` for more details.
The provided `train_data` and `test_data` must have the same length,
i.e., both data sets must have the same number of documents; the model
will use train_data to estimate which topic the document belongs to, and
this is used to estimate the model's performance at predicting the
unseen words in the test data.
See :py:func:`~turicreate.topic_model.TopicModel.predict` for details
on how these predictions are made, and see
:py:func:`~turicreate.text_analytics.random_split` for a helper function
that can be used for making train/test splits.
Parameters
----------
train_data : SArray or SFrame
A set of documents to predict topics for.
test_data : SArray or SFrame, optional
A set of documents to evaluate performance on.
By default this will set to be the same as train_data.
metric : str
The chosen metric to use for evaluating the topic model.
Currently only 'perplexity' is supported.
Returns
-------
out : dict
The set of estimated evaluation metrics.
See Also
--------
predict, turicreate.toolkits.text_analytics.random_split
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = turicreate.topic_model.create(train_data)
>>> m.evaluate(train_data, test_data)
{'perplexity': 2467.530370396021} | [
"Estimate",
"the",
"model",
"s",
"ability",
"to",
"predict",
"new",
"data",
".",
"Imagine",
"you",
"have",
"a",
"corpus",
"of",
"books",
".",
"One",
"common",
"approach",
"to",
"evaluating",
"topic",
"models",
"is",
"to",
"train",
"on",
"the",
"first",
"... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L663-L731 |
29,619 | apple/turicreate | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | _raise_error_if_not_drawing_classifier_input_sframe | def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
"""
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!") | python | def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
"""
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!") | [
"def",
"_raise_error_if_not_drawing_classifier_input_sframe",
"(",
"dataset",
",",
"feature",
",",
"target",
")",
":",
"from",
"turicreate",
".",
"toolkits",
".",
"_internal_utils",
"import",
"_raise_error_if_not_sframe",
"_raise_error_if_not_sframe",
"(",
"dataset",
")",
... | Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong. | [
"Performs",
"some",
"sanity",
"checks",
"on",
"the",
"SFrame",
"provided",
"as",
"input",
"to",
"turicreate",
".",
"drawing_classifier",
".",
"create",
"and",
"raises",
"a",
"ToolkitError",
"if",
"something",
"in",
"the",
"dataset",
"is",
"missing",
"or",
"wro... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L22-L45 |
29,620 | apple/turicreate | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | DrawingClassifier._predict_with_probabilities | def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
"""
from .._mxnet import _mxnet_utils
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()})) | python | def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
"""
from .._mxnet import _mxnet_utils
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()})) | [
"def",
"_predict_with_probabilities",
"(",
"self",
",",
"input_dataset",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"from",
".",
".",
"_mxnet",
"import",
"_mxnet_utils",
"import",
"mxnet",
"as",
"_mx",
"from",
".",
"_sframe_loader",... | Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function. | [
"Predict",
"with",
"probabilities",
".",
"The",
"core",
"prediction",
"part",
"that",
"both",
"evaluate",
"and",
"predict",
"share",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L522-L601 |
29,621 | apple/turicreate | src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py | DrawingClassifier.predict | def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"] | python | def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
"""
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"] | [
"def",
"predict",
"(",
"self",
",",
"data",
",",
"output_type",
"=",
"'class'",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"_tkutl",
".",
"_check_categorical_option_type",
"(",
"\"output_type\"",
",",
"output_type",
",",
"[",
"\"p... | Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4] | [
"Predict",
"on",
"an",
"SFrame",
"or",
"SArray",
"of",
"drawings",
"or",
"on",
"a",
"single",
"drawing",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py#L788-L879 |
29,622 | apple/turicreate | src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py | _BOW_FEATURE_EXTRACTOR | def _BOW_FEATURE_EXTRACTOR(sf, target=None):
"""
Return an SFrame containing a bag of words representation of each column.
"""
if isinstance(sf, dict):
out = _tc.SArray([sf]).unpack('')
elif isinstance(sf, _tc.SFrame):
out = sf.__copy__()
else:
raise ValueError("Unrecognized input to feature extractor.")
for f in _get_str_columns(out):
if target != f:
out[f] = _tc.text_analytics.count_words(out[f])
return out | python | def _BOW_FEATURE_EXTRACTOR(sf, target=None):
"""
Return an SFrame containing a bag of words representation of each column.
"""
if isinstance(sf, dict):
out = _tc.SArray([sf]).unpack('')
elif isinstance(sf, _tc.SFrame):
out = sf.__copy__()
else:
raise ValueError("Unrecognized input to feature extractor.")
for f in _get_str_columns(out):
if target != f:
out[f] = _tc.text_analytics.count_words(out[f])
return out | [
"def",
"_BOW_FEATURE_EXTRACTOR",
"(",
"sf",
",",
"target",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"sf",
",",
"dict",
")",
":",
"out",
"=",
"_tc",
".",
"SArray",
"(",
"[",
"sf",
"]",
")",
".",
"unpack",
"(",
"''",
")",
"elif",
"isinstance",... | Return an SFrame containing a bag of words representation of each column. | [
"Return",
"an",
"SFrame",
"containing",
"a",
"bag",
"of",
"words",
"representation",
"of",
"each",
"column",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L18-L31 |
29,623 | apple/turicreate | src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py | _get_str_columns | def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
"""
return [name for name in sf.column_names() if sf[name].dtype == str] | python | def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
"""
return [name for name in sf.column_names() if sf[name].dtype == str] | [
"def",
"_get_str_columns",
"(",
"sf",
")",
":",
"return",
"[",
"name",
"for",
"name",
"in",
"sf",
".",
"column_names",
"(",
")",
"if",
"sf",
"[",
"name",
"]",
".",
"dtype",
"==",
"str",
"]"
] | Returns a list of names of columns that are string type. | [
"Returns",
"a",
"list",
"of",
"names",
"of",
"columns",
"that",
"are",
"string",
"type",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L372-L376 |
29,624 | apple/turicreate | src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py | TextClassifier.predict | def predict(self, dataset, output_type='class'):
"""
Return predictions for ``dataset``, using the trained model.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, evaluate, classify
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> m.predict(dataset)
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.predict(f(dataset, target), output_type=output_type) | python | def predict(self, dataset, output_type='class'):
"""
Return predictions for ``dataset``, using the trained model.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, evaluate, classify
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> m.predict(dataset)
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.predict(f(dataset, target), output_type=output_type) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'class'",
")",
":",
"m",
"=",
"self",
".",
"__proxy__",
"[",
"'classifier'",
"]",
"target",
"=",
"self",
".",
"__proxy__",
"[",
"'target'",
"]",
"f",
"=",
"_BOW_FEATURE_EXTRACTOR",
... | Return predictions for ``dataset``, using the trained model.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, evaluate, classify
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> m.predict(dataset) | [
"Return",
"predictions",
"for",
"dataset",
"using",
"the",
"trained",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L182-L224 |
29,625 | apple/turicreate | src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py | TextClassifier.classify | def classify(self, dataset):
"""
Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as both class
labels as well as probabilities that the predicted value is the
associated label.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> output = m.classify(dataset)
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.classify(f(dataset, target)) | python | def classify(self, dataset):
"""
Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as both class
labels as well as probabilities that the predicted value is the
associated label.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> output = m.classify(dataset)
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.classify(f(dataset, target)) | [
"def",
"classify",
"(",
"self",
",",
"dataset",
")",
":",
"m",
"=",
"self",
".",
"__proxy__",
"[",
"'classifier'",
"]",
"target",
"=",
"self",
".",
"__proxy__",
"[",
"'target'",
"]",
"f",
"=",
"_BOW_FEATURE_EXTRACTOR",
"return",
"m",
".",
"classify",
"("... | Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as both class
labels as well as probabilities that the predicted value is the
associated label.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> output = m.classify(dataset) | [
"Return",
"a",
"classification",
"for",
"each",
"example",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"both",
"class",
"labels",
"as",
"well",
"as",
"probabilities",
"that",
"the... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/text_classifier/_text_classifier.py#L226-L260 |
29,626 | apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py | _generate_base_svm_regression_spec | def _generate_base_svm_regression_spec(model):
"""
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorRegressor
_set_kernel(model, svm)
svm.rho = -model.intercept_[0]
for i in range(len(model._dual_coef_)):
for cur_alpha in model._dual_coef_[i]:
svm.coefficients.alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec | python | def _generate_base_svm_regression_spec(model):
"""
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorRegressor
_set_kernel(model, svm)
svm.rho = -model.intercept_[0]
for i in range(len(model._dual_coef_)):
for cur_alpha in model._dual_coef_[i]:
svm.coefficients.alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec | [
"def",
"_generate_base_svm_regression_spec",
"(",
"model",
")",
":",
"if",
"not",
"(",
"_HAS_SKLEARN",
")",
":",
"raise",
"RuntimeError",
"(",
"'scikit-learn not found. scikit-learn conversion API is disabled.'",
")",
"spec",
"=",
"_Model_pb2",
".",
"Model",
"(",
")",
... | Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs. | [
"Takes",
"an",
"SVM",
"regression",
"model",
"produces",
"a",
"starting",
"spec",
"using",
"the",
"parts",
".",
"that",
"are",
"shared",
"between",
"all",
"SVMs",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_SVR.py#L23-L46 |
29,627 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _VerifyExtensionHandle | def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name)) | python | def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name)) | [
"def",
"_VerifyExtensionHandle",
"(",
"message",
",",
"extension_handle",
")",
":",
"if",
"not",
"isinstance",
"(",
"extension_handle",
",",
"_FieldDescriptor",
")",
":",
"raise",
"KeyError",
"(",
"'HasExtension() expects an extension handle, got: %s'",
"%",
"extension_ha... | Verify that the given extension handle is valid. | [
"Verify",
"that",
"the",
"given",
"extension",
"handle",
"is",
"valid",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L213-L232 |
29,628 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddEnumValues | def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number) | python | def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number) | [
"def",
"_AddEnumValues",
"(",
"descriptor",
",",
"cls",
")",
":",
"for",
"enum_type",
"in",
"descriptor",
".",
"enum_types",
":",
"setattr",
"(",
"cls",
",",
"enum_type",
".",
"name",
",",
"enum_type_wrapper",
".",
"EnumTypeWrapper",
"(",
"enum_type",
")",
"... | Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type. | [
"Sets",
"class",
"-",
"level",
"attributes",
"for",
"all",
"enum",
"fields",
"defined",
"in",
"this",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L347-L359 |
29,629 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _DefaultValueConstructorForField | def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault | python | def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault | [
"def",
"_DefaultValueConstructorForField",
"(",
"field",
")",
":",
"if",
"_IsMapField",
"(",
"field",
")",
":",
"return",
"_GetInitializeDefaultForMap",
"(",
"field",
")",
"if",
"field",
".",
"label",
"==",
"_FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"if",
"... | Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference. | [
"Returns",
"a",
"function",
"which",
"returns",
"a",
"default",
"value",
"for",
"a",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L384-L436 |
29,630 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _ReraiseTypeErrorWithFieldName | def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
# simple TypeError; add field name to exception message
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
six.reraise(type(exc), exc, sys.exc_info()[2]) | python | def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
# simple TypeError; add field name to exception message
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
six.reraise(type(exc), exc, sys.exc_info()[2]) | [
"def",
"_ReraiseTypeErrorWithFieldName",
"(",
"message_name",
",",
"field_name",
")",
":",
"exc",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"if",
"len",
"(",
"exc",
".",
"args",
")",
"==",
"1",
"and",
"type",
"(",
"exc",
")",
"is",
"TypeE... | Re-raise the currently-handled TypeError with the field name added. | [
"Re",
"-",
"raise",
"the",
"currently",
"-",
"handled",
"TypeError",
"with",
"the",
"field",
"name",
"added",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L439-L447 |
29,631 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _GetFieldByName | def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name)) | python | def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name)) | [
"def",
"_GetFieldByName",
"(",
"message_descriptor",
",",
"field_name",
")",
":",
"try",
":",
"return",
"message_descriptor",
".",
"fields_by_name",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Protocol message %s has no \"%s\" field.... | Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name. | [
"Returns",
"a",
"field",
"descriptor",
"by",
"field",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L534-L547 |
29,632 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddPropertiesForNonRepeatedScalarField | def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the proto3 defaults
# (0, 0.0, enum 0, and False).
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc)) | python | def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the proto3 defaults
# (0, 0.0, enum 0, and False).
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc)) | [
"def",
"_AddPropertiesForNonRepeatedScalarField",
"(",
"field",
",",
"cls",
")",
":",
"proto_field_name",
"=",
"field",
".",
"name",
"property_name",
"=",
"_PropertyName",
"(",
"proto_field_name",
")",
"type_checker",
"=",
"type_checkers",
".",
"GetTypeChecker",
"(",
... | Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing. | [
"Adds",
"a",
"public",
"property",
"for",
"a",
"nonrepeated",
"scalar",
"protocol",
"message",
"field",
".",
"Clients",
"can",
"use",
"this",
"property",
"to",
"get",
"and",
"directly",
"set",
"the",
"value",
"of",
"the",
"field",
".",
"Note",
"that",
"whe... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L630-L683 |
29,633 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _InternalUnpackAny | def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message | python | def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message | [
"def",
"_InternalUnpackAny",
"(",
"msg",
")",
":",
"# TODO(amauryfa): Don't use the factory of generated messages.",
"# To make Any work with custom factories, use the message factory of the",
"# parent message.",
"# pylint: disable=g-import-not-at-top",
"from",
"google",
".",
"protobuf",
... | Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message. | [
"Unpacks",
"Any",
"message",
"and",
"returns",
"the",
"unpacked",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L892-L929 |
29,634 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _BytesForNonRepeatedElement | def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) | python | def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) | [
"def",
"_BytesForNonRepeatedElement",
"(",
"value",
",",
"field_number",
",",
"field_type",
")",
":",
"try",
":",
"fn",
"=",
"type_checkers",
".",
"TYPE_TO_BYTE_SIZE_FN",
"[",
"field_type",
"]",
"return",
"fn",
"(",
"field_number",
",",
"value",
")",
"except",
... | Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor. | [
"Returns",
"the",
"number",
"of",
"bytes",
"needed",
"to",
"serialize",
"a",
"non",
"-",
"repeated",
"element",
".",
"The",
"returned",
"byte",
"count",
"includes",
"space",
"for",
"tag",
"information",
"and",
"any",
"other",
"additional",
"space",
"associated... | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L984-L1001 |
29,635 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddIsInitializedMethod | def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
if (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = "%s[%s]." % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors | python | def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
if (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = "%s[%s]." % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors | [
"def",
"_AddIsInitializedMethod",
"(",
"message_descriptor",
",",
"cls",
")",
":",
"required_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"message_descriptor",
".",
"fields",
"if",
"field",
".",
"label",
"==",
"_FieldDescriptor",
".",
"LABEL_REQUIRED",
"]",
... | Adds the IsInitialized and FindInitializationError methods to the
protocol message class. | [
"Adds",
"the",
"IsInitialized",
"and",
"FindInitializationError",
"methods",
"to",
"the",
"protocol",
"message",
"class",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1106-L1198 |
29,636 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddMessageMethods | def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
_AddReduceMethod(cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener | python | def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
_AddReduceMethod(cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener | [
"def",
"_AddMessageMethods",
"(",
"message_descriptor",
",",
"cls",
")",
":",
"_AddListFieldsMethod",
"(",
"message_descriptor",
",",
"cls",
")",
"_AddHasFieldMethod",
"(",
"message_descriptor",
",",
"cls",
")",
"_AddClearFieldMethod",
"(",
"message_descriptor",
",",
... | Adds implementations of all Message methods to cls. | [
"Adds",
"implementations",
"of",
"all",
"Message",
"methods",
"to",
"cls",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1295-L1318 |
29,637 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _AddPrivateHelperMethods | def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState | python | def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState | [
"def",
"_AddPrivateHelperMethods",
"(",
"message_descriptor",
",",
"cls",
")",
":",
"def",
"Modified",
"(",
"self",
")",
":",
"\"\"\"Sets the _cached_byte_size_dirty bit to true,\n and propagates this to our listener iff this was a state change.\n \"\"\"",
"# Note: Some callers ... | Adds implementation of private helper methods to cls. | [
"Adds",
"implementation",
"of",
"private",
"helper",
"methods",
"to",
"cls",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1321-L1352 |
29,638 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py | _OneofListener.Modified | def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass | python | def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass | [
"def",
"Modified",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_parent_message_weakref",
".",
"_UpdateOneofState",
"(",
"self",
".",
"_field",
")",
"super",
"(",
"_OneofListener",
",",
"self",
")",
".",
"Modified",
"(",
")",
"except",
"ReferenceError",
... | Also updates the state of the containing oneof in the parent message. | [
"Also",
"updates",
"the",
"state",
"of",
"the",
"containing",
"oneof",
"in",
"the",
"parent",
"message",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L1413-L1419 |
29,639 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py | EnumTypeWrapper.Name | def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number)) | python | def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number)) | [
"def",
"Name",
"(",
"self",
",",
"number",
")",
":",
"if",
"number",
"in",
"self",
".",
"_enum_type",
".",
"values_by_number",
":",
"return",
"self",
".",
"_enum_type",
".",
"values_by_number",
"[",
"number",
"]",
".",
"name",
"raise",
"ValueError",
"(",
... | Returns a string containing the name of an enum value. | [
"Returns",
"a",
"string",
"containing",
"the",
"name",
"of",
"an",
"enum",
"value",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L51-L56 |
29,640 | apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py | EnumTypeWrapper.Value | def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name)) | python | def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name)) | [
"def",
"Value",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"_enum_type",
".",
"values_by_name",
":",
"return",
"self",
".",
"_enum_type",
".",
"values_by_name",
"[",
"name",
"]",
".",
"number",
"raise",
"ValueError",
"(",
"'Enum... | Returns the value coresponding to the given enum name. | [
"Returns",
"the",
"value",
"coresponding",
"to",
"the",
"given",
"enum",
"name",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/enum_type_wrapper.py#L58-L63 |
29,641 | apple/turicreate | src/unity/python/turicreate/toolkits/_mps_utils.py | _load_tcmps_lib | def _load_tcmps_lib():
"""
Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed.
"""
global _g_TCMPS_LIB
if _g_TCMPS_LIB is None:
# This library requires macOS 10.14 or above
if _mac_ver() < (10, 14):
return None
# The symbols defined in libtcmps are now exposed directly by
# libunity_shared. Eventually the object_detector and
# activity_classifier toolkits will use the same Python/C++ bridge as
# the other toolkits, and this usage of ctypes will go away.
file_dir = _os.path.dirname(__file__)
lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, 'libunity_shared.dylib'))
try:
_g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL)
except OSError:
pass
return _g_TCMPS_LIB | python | def _load_tcmps_lib():
"""
Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed.
"""
global _g_TCMPS_LIB
if _g_TCMPS_LIB is None:
# This library requires macOS 10.14 or above
if _mac_ver() < (10, 14):
return None
# The symbols defined in libtcmps are now exposed directly by
# libunity_shared. Eventually the object_detector and
# activity_classifier toolkits will use the same Python/C++ bridge as
# the other toolkits, and this usage of ctypes will go away.
file_dir = _os.path.dirname(__file__)
lib_path = _os.path.abspath(_os.path.join(file_dir, _os.pardir, 'libunity_shared.dylib'))
try:
_g_TCMPS_LIB = _ctypes.CDLL(lib_path, _ctypes.RTLD_LOCAL)
except OSError:
pass
return _g_TCMPS_LIB | [
"def",
"_load_tcmps_lib",
"(",
")",
":",
"global",
"_g_TCMPS_LIB",
"if",
"_g_TCMPS_LIB",
"is",
"None",
":",
"# This library requires macOS 10.14 or above",
"if",
"_mac_ver",
"(",
")",
"<",
"(",
"10",
",",
"14",
")",
":",
"return",
"None",
"# The symbols defined in... | Load global singleton of tcmps lib handler.
This function is used not used at the top level, so
that the shared library is loaded lazily only when needed. | [
"Load",
"global",
"singleton",
"of",
"tcmps",
"lib",
"handler",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L141-L164 |
29,642 | apple/turicreate | src/unity/python/turicreate/toolkits/_mps_utils.py | mps_device_name | def mps_device_name():
"""
Returns name of MPS device that will be used, else None.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
n = 256
c_name = (_ctypes.c_char * n)()
ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n))
if ret == 0:
return _decode_bytes_to_native_string(c_name.value)
else:
return None | python | def mps_device_name():
"""
Returns name of MPS device that will be used, else None.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
n = 256
c_name = (_ctypes.c_char * n)()
ret = lib.TCMPSMetalDeviceName(_ctypes.byref(c_name), _ctypes.c_int32(n))
if ret == 0:
return _decode_bytes_to_native_string(c_name.value)
else:
return None | [
"def",
"mps_device_name",
"(",
")",
":",
"lib",
"=",
"_load_tcmps_lib",
"(",
")",
"if",
"lib",
"is",
"None",
":",
"return",
"None",
"n",
"=",
"256",
"c_name",
"=",
"(",
"_ctypes",
".",
"c_char",
"*",
"n",
")",
"(",
")",
"ret",
"=",
"lib",
".",
"T... | Returns name of MPS device that will be used, else None. | [
"Returns",
"name",
"of",
"MPS",
"device",
"that",
"will",
"be",
"used",
"else",
"None",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L188-L202 |
29,643 | apple/turicreate | src/unity/python/turicreate/toolkits/_mps_utils.py | mps_device_memory_limit | def mps_device_memory_limit():
"""
Returns the memory size in bytes that can be effectively allocated on the
MPS device that will be used, or None if no suitable device is available.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
c_size = _ctypes.c_uint64()
ret = lib.TCMPSMetalDeviceMemoryLimit(_ctypes.byref(c_size))
return c_size.value if ret == 0 else None | python | def mps_device_memory_limit():
"""
Returns the memory size in bytes that can be effectively allocated on the
MPS device that will be used, or None if no suitable device is available.
"""
lib = _load_tcmps_lib()
if lib is None:
return None
c_size = _ctypes.c_uint64()
ret = lib.TCMPSMetalDeviceMemoryLimit(_ctypes.byref(c_size))
return c_size.value if ret == 0 else None | [
"def",
"mps_device_memory_limit",
"(",
")",
":",
"lib",
"=",
"_load_tcmps_lib",
"(",
")",
"if",
"lib",
"is",
"None",
":",
"return",
"None",
"c_size",
"=",
"_ctypes",
".",
"c_uint64",
"(",
")",
"ret",
"=",
"lib",
".",
"TCMPSMetalDeviceMemoryLimit",
"(",
"_c... | Returns the memory size in bytes that can be effectively allocated on the
MPS device that will be used, or None if no suitable device is available. | [
"Returns",
"the",
"memory",
"size",
"in",
"bytes",
"that",
"can",
"be",
"effectively",
"allocated",
"on",
"the",
"MPS",
"device",
"that",
"will",
"be",
"used",
"or",
"None",
"if",
"no",
"suitable",
"device",
"is",
"available",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L205-L216 |
29,644 | apple/turicreate | src/unity/python/turicreate/toolkits/_mps_utils.py | MpsFloatArray.shape | def shape(self):
"""Copy the shape from TCMPS as a new numpy ndarray."""
# Create C variables that will serve as out parameters for TCMPS.
shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr
dim = _ctypes.c_size_t() # size_t dim
# Obtain pointer into memory owned by the C++ object self.handle.
status_code = self._LIB.TCMPSGetFloatArrayShape(
self.handle, _ctypes.byref(shape_ptr), _ctypes.byref(dim))
assert status_code == 0, "Error calling TCMPSGetFloatArrayShape"
return _shape_tuple_from_ctypes(shape_ptr, dim) | python | def shape(self):
"""Copy the shape from TCMPS as a new numpy ndarray."""
# Create C variables that will serve as out parameters for TCMPS.
shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr
dim = _ctypes.c_size_t() # size_t dim
# Obtain pointer into memory owned by the C++ object self.handle.
status_code = self._LIB.TCMPSGetFloatArrayShape(
self.handle, _ctypes.byref(shape_ptr), _ctypes.byref(dim))
assert status_code == 0, "Error calling TCMPSGetFloatArrayShape"
return _shape_tuple_from_ctypes(shape_ptr, dim) | [
"def",
"shape",
"(",
"self",
")",
":",
"# Create C variables that will serve as out parameters for TCMPS.",
"shape_ptr",
"=",
"_ctypes",
".",
"POINTER",
"(",
"_ctypes",
".",
"c_size_t",
")",
"(",
")",
"# size_t* shape_ptr",
"dim",
"=",
"_ctypes",
".",
"c_size_t",
"(... | Copy the shape from TCMPS as a new numpy ndarray. | [
"Copy",
"the",
"shape",
"from",
"TCMPS",
"as",
"a",
"new",
"numpy",
"ndarray",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L314-L326 |
29,645 | apple/turicreate | src/unity/python/turicreate/toolkits/_mps_utils.py | MpsFloatArray.asnumpy | def asnumpy(self):
"""Copy the data from TCMPS into a new numpy ndarray"""
# Create C variables that will serve as out parameters for TCMPS.
data_ptr = _ctypes.POINTER(_ctypes.c_float)() # float* data_ptr
shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr
dim = _ctypes.c_size_t() # size_t dim
# Obtain pointers into memory owned by the C++ object self.handle.
# Note that this may trigger synchronization with another thread
# producing the data.
status_code = self._LIB.TCMPSReadFloatArray(
self.handle, _ctypes.byref(data_ptr), _ctypes.byref(shape_ptr),
_ctypes.byref(dim))
assert status_code == 0, "Error calling TCMPSReadFloatArray"
return _numpy_array_from_ctypes(data_ptr, shape_ptr, dim) | python | def asnumpy(self):
"""Copy the data from TCMPS into a new numpy ndarray"""
# Create C variables that will serve as out parameters for TCMPS.
data_ptr = _ctypes.POINTER(_ctypes.c_float)() # float* data_ptr
shape_ptr = _ctypes.POINTER(_ctypes.c_size_t)() # size_t* shape_ptr
dim = _ctypes.c_size_t() # size_t dim
# Obtain pointers into memory owned by the C++ object self.handle.
# Note that this may trigger synchronization with another thread
# producing the data.
status_code = self._LIB.TCMPSReadFloatArray(
self.handle, _ctypes.byref(data_ptr), _ctypes.byref(shape_ptr),
_ctypes.byref(dim))
assert status_code == 0, "Error calling TCMPSReadFloatArray"
return _numpy_array_from_ctypes(data_ptr, shape_ptr, dim) | [
"def",
"asnumpy",
"(",
"self",
")",
":",
"# Create C variables that will serve as out parameters for TCMPS.",
"data_ptr",
"=",
"_ctypes",
".",
"POINTER",
"(",
"_ctypes",
".",
"c_float",
")",
"(",
")",
"# float* data_ptr",
"shape_ptr",
"=",
"_ctypes",
".",
"POINTER",
... | Copy the data from TCMPS into a new numpy ndarray | [
"Copy",
"the",
"data",
"from",
"TCMPS",
"into",
"a",
"new",
"numpy",
"ndarray"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_mps_utils.py#L328-L344 |
29,646 | Miserlou/Zappa | zappa/asynchronous.py | route_sns_task | def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message) | python | def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message) | [
"def",
"route_sns_task",
"(",
"event",
",",
"context",
")",
":",
"record",
"=",
"event",
"[",
"'Records'",
"]",
"[",
"0",
"]",
"message",
"=",
"json",
".",
"loads",
"(",
"record",
"[",
"'Sns'",
"]",
"[",
"'Message'",
"]",
")",
"return",
"run_message",
... | Gets SNS Message, deserialises the message,
imports the function, calls the function with args | [
"Gets",
"SNS",
"Message",
"deserialises",
"the",
"message",
"imports",
"the",
"function",
"calls",
"the",
"function",
"with",
"args"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L275-L284 |
29,647 | Miserlou/Zappa | zappa/asynchronous.py | task | def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = 'lambda'
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get('service', 'lambda')
lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name')
aws_region_arg = kwargs.get('remote_aws_region')
capture_response = kwargs.get('capture_response', False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME')
aws_region = aws_region_arg or os.environ.get('AWS_REGION')
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper | python | def task(*args, **kwargs):
"""Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if not kwargs: # Default Values
service = 'lambda'
lambda_function_name_arg = None
aws_region_arg = None
else: # Arguments were passed
service = kwargs.get('service', 'lambda')
lambda_function_name_arg = kwargs.get('remote_aws_lambda_function_name')
aws_region_arg = kwargs.get('remote_aws_region')
capture_response = kwargs.get('capture_response', False)
def func_wrapper(func):
task_path = get_func_task_path(func)
@wraps(func)
def _run_async(*args, **kwargs):
"""
This is the wrapping async function that replaces the function
that is decorated with @task.
Args:
These are just passed through to @task's func
Assuming a valid service is passed to task() and it is run
inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists),
it dispatches the function to be run through the service variable.
Otherwise, it runs the task synchronously.
Returns:
In async mode, the object returned includes state of the dispatch.
For instance
When outside of Lambda, the func passed to @task is run and we
return the actual value.
"""
lambda_function_name = lambda_function_name_arg or os.environ.get('AWS_LAMBDA_FUNCTION_NAME')
aws_region = aws_region_arg or os.environ.get('AWS_REGION')
if (service in ASYNC_CLASSES) and (lambda_function_name):
send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name,
aws_region=aws_region,
capture_response=capture_response).send(task_path, args, kwargs)
return send_result
else:
return func(*args, **kwargs)
update_wrapper(_run_async, func)
_run_async.service = service
_run_async.sync = func
return _run_async
return func_wrapper(func) if func else func_wrapper | [
"def",
"task",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"None",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"callable",
"(",
"args",
"[",
"0",
"]",
")",
":",
"func",
"=",
"args",
"[",
"0",
"]",
"if",
"not",
"kw... | Async task decorator so that running
Args:
func (function): the function to be wrapped
Further requirements:
func must be an independent top-level function.
i.e. not a class method or an anonymous function
service (str): either 'lambda' or 'sns'
remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task
remote_aws_region (str): the name of a remote region to make lambda/sns calls against
Returns:
A replacement function that dispatches func() to
run asynchronously through the service in question | [
"Async",
"task",
"decorator",
"so",
"that",
"running"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L364-L439 |
29,648 | Miserlou/Zappa | zappa/asynchronous.py | get_func_task_path | def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = '{module_path}.{func_name}'.format(
module_path=module_path,
func_name=func.__name__
)
return task_path | python | def get_func_task_path(func):
"""
Format the modular task path for a function via inspection.
"""
module_path = inspect.getmodule(func).__name__
task_path = '{module_path}.{func_name}'.format(
module_path=module_path,
func_name=func.__name__
)
return task_path | [
"def",
"get_func_task_path",
"(",
"func",
")",
":",
"module_path",
"=",
"inspect",
".",
"getmodule",
"(",
"func",
")",
".",
"__name__",
"task_path",
"=",
"'{module_path}.{func_name}'",
".",
"format",
"(",
"module_path",
"=",
"module_path",
",",
"func_name",
"=",... | Format the modular task path for a function via inspection. | [
"Format",
"the",
"modular",
"task",
"path",
"for",
"a",
"function",
"via",
"inspection",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L464-L473 |
29,649 | Miserlou/Zappa | zappa/asynchronous.py | get_async_response | def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={'id': {'S': str(response_id)}}
)
if 'Item' not in response:
return None
return {
'status': response['Item']['async_status']['S'],
'response': json.loads(response['Item']['async_response']['S']),
} | python | def get_async_response(response_id):
"""
Get the response from the async table
"""
response = DYNAMODB_CLIENT.get_item(
TableName=ASYNC_RESPONSE_TABLE,
Key={'id': {'S': str(response_id)}}
)
if 'Item' not in response:
return None
return {
'status': response['Item']['async_status']['S'],
'response': json.loads(response['Item']['async_response']['S']),
} | [
"def",
"get_async_response",
"(",
"response_id",
")",
":",
"response",
"=",
"DYNAMODB_CLIENT",
".",
"get_item",
"(",
"TableName",
"=",
"ASYNC_RESPONSE_TABLE",
",",
"Key",
"=",
"{",
"'id'",
":",
"{",
"'S'",
":",
"str",
"(",
"response_id",
")",
"}",
"}",
")"... | Get the response from the async table | [
"Get",
"the",
"response",
"from",
"the",
"async",
"table"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L476-L490 |
29,650 | Miserlou/Zappa | zappa/asynchronous.py | LambdaAsyncResponse.send | def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
'task_path': task_path,
'capture_response': self.capture_response,
'response_id': self.response_id,
'args': args,
'kwargs': kwargs
}
self._send(message)
return self | python | def send(self, task_path, args, kwargs):
"""
Create the message object and pass it to the actual sender.
"""
message = {
'task_path': task_path,
'capture_response': self.capture_response,
'response_id': self.response_id,
'args': args,
'kwargs': kwargs
}
self._send(message)
return self | [
"def",
"send",
"(",
"self",
",",
"task_path",
",",
"args",
",",
"kwargs",
")",
":",
"message",
"=",
"{",
"'task_path'",
":",
"task_path",
",",
"'capture_response'",
":",
"self",
".",
"capture_response",
",",
"'response_id'",
":",
"self",
".",
"response_id",
... | Create the message object and pass it to the actual sender. | [
"Create",
"the",
"message",
"object",
"and",
"pass",
"it",
"to",
"the",
"actual",
"sender",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L162-L174 |
29,651 | Miserlou/Zappa | zappa/asynchronous.py | LambdaAsyncResponse._send | def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message['command'] = 'zappa.asynchronous.route_lambda_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType='Event', #makes the call async
Payload=payload
)
self.sent = (self.response.get('StatusCode', 0) == 202) | python | def _send(self, message):
"""
Given a message, directly invoke the lamdba function for this task.
"""
message['command'] = 'zappa.asynchronous.route_lambda_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for async Lambda call")
self.response = self.client.invoke(
FunctionName=self.lambda_function_name,
InvocationType='Event', #makes the call async
Payload=payload
)
self.sent = (self.response.get('StatusCode', 0) == 202) | [
"def",
"_send",
"(",
"self",
",",
"message",
")",
":",
"message",
"[",
"'command'",
"]",
"=",
"'zappa.asynchronous.route_lambda_task'",
"payload",
"=",
"json",
".",
"dumps",
"(",
"message",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"len",
"(",
"payloa... | Given a message, directly invoke the lamdba function for this task. | [
"Given",
"a",
"message",
"directly",
"invoke",
"the",
"lamdba",
"function",
"for",
"this",
"task",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L176-L189 |
29,652 | Miserlou/Zappa | zappa/asynchronous.py | SnsAsyncResponse._send | def _send(self, message):
"""
Given a message, publish to this topic.
"""
message['command'] = 'zappa.asynchronous.route_sns_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(
TargetArn=self.arn,
Message=payload
)
self.sent = self.response.get('MessageId') | python | def _send(self, message):
"""
Given a message, publish to this topic.
"""
message['command'] = 'zappa.asynchronous.route_sns_task'
payload = json.dumps(message).encode('utf-8')
if len(payload) > LAMBDA_ASYNC_PAYLOAD_LIMIT: # pragma: no cover
raise AsyncException("Payload too large for SNS")
self.response = self.client.publish(
TargetArn=self.arn,
Message=payload
)
self.sent = self.response.get('MessageId') | [
"def",
"_send",
"(",
"self",
",",
"message",
")",
":",
"message",
"[",
"'command'",
"]",
"=",
"'zappa.asynchronous.route_sns_task'",
"payload",
"=",
"json",
".",
"dumps",
"(",
"message",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"len",
"(",
"payload",... | Given a message, publish to this topic. | [
"Given",
"a",
"message",
"publish",
"to",
"this",
"topic",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/asynchronous.py#L242-L254 |
29,653 | Miserlou/Zappa | zappa/utilities.py | parse_s3_url | def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path | python | def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path | [
"def",
"parse_s3_url",
"(",
"url",
")",
":",
"bucket",
"=",
"''",
"path",
"=",
"''",
"if",
"url",
":",
"result",
"=",
"urlparse",
"(",
"url",
")",
"bucket",
"=",
"result",
".",
"netloc",
"path",
"=",
"result",
".",
"path",
".",
"strip",
"(",
"'/'",... | Parses S3 URL.
Returns bucket (domain) and file (full path). | [
"Parses",
"S3",
"URL",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L67-L79 |
29,654 | Miserlou/Zappa | zappa/utilities.py | string_to_timestamp | def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring);
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0 | python | def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring);
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0 | [
"def",
"string_to_timestamp",
"(",
"timestring",
")",
":",
"ts",
"=",
"None",
"# Uses an extended version of Go's duration string.",
"try",
":",
"delta",
"=",
"durationpy",
".",
"from_str",
"(",
"timestring",
")",
"past",
"=",
"datetime",
".",
"datetime",
".",
"ut... | Accepts a str, returns an int timestamp. | [
"Accepts",
"a",
"str",
"returns",
"an",
"int",
"timestamp",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L91-L111 |
29,655 | Miserlou/Zappa | zappa/utilities.py | detect_django_settings | def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches | python | def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches | [
"def",
"detect_django_settings",
"(",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
":",
"for",
"filename",
"in",
"fnmatch",
".",
"filter",
"(",... | Automatically try to discover Django settings files,
return them as relative module paths. | [
"Automatically",
"try",
"to",
"discover",
"Django",
"settings",
"files",
"return",
"them",
"as",
"relative",
"module",
"paths",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L117-L134 |
29,656 | Miserlou/Zappa | zappa/utilities.py | detect_flask_apps | def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
with io.open(full, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if '= Flask(' in line:
app = line.split('= Flask(')[0].strip()
if '=Flask(' in line:
app = line.split('=Flask(')[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
app_module = package_module + '.' + app
matches.append(app_module)
return matches | python | def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
with io.open(full, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if '= Flask(' in line:
app = line.split('= Flask(')[0].strip()
if '=Flask(' in line:
app = line.split('=Flask(')[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
app_module = package_module + '.' + app
matches.append(app_module)
return matches | [
"def",
"detect_flask_apps",
"(",
")",
":",
"matches",
"=",
"[",
"]",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
":",
"for",
"filename",
"in",
"fnmatch",
".",
"filter",
"(",
"f... | Automatically try to discover Flask apps files,
return them as relative module paths. | [
"Automatically",
"try",
"to",
"discover",
"Flask",
"apps",
"files",
"return",
"them",
"as",
"relative",
"module",
"paths",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L136-L171 |
29,657 | Miserlou/Zappa | zappa/utilities.py | check_new_version_available | def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version | python | def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
return this_version != top_version | [
"def",
"check_new_version_available",
"(",
"this_version",
")",
":",
"import",
"requests",
"pypi_url",
"=",
"'https://pypi.python.org/pypi/Zappa/json'",
"resp",
"=",
"requests",
".",
"get",
"(",
"pypi_url",
",",
"timeout",
"=",
"1.5",
")",
"top_version",
"=",
"resp"... | Checks if a newer version of Zappa is available.
Returns True is updateable, else False. | [
"Checks",
"if",
"a",
"newer",
"version",
"of",
"Zappa",
"is",
"available",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L442-L455 |
29,658 | Miserlou/Zappa | zappa/utilities.py | conflicts_with_a_neighbouring_module | def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name+'.py'
return conflicting_neighbour_filename in neighbours | python | def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name+'.py'
return conflicting_neighbour_filename in neighbours | [
"def",
"conflicts_with_a_neighbouring_module",
"(",
"directory_path",
")",
":",
"parent_dir_path",
",",
"current_dir_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"directory_path",
")",
")",
"neighbours",
"=",
"os",
... | Checks if a directory lies in the same directory as a .py file with the same name. | [
"Checks",
"if",
"a",
"directory",
"lies",
"in",
"the",
"same",
"directory",
"as",
"a",
".",
"py",
"file",
"with",
"the",
"same",
"name",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/utilities.py#L509-L516 |
29,659 | Miserlou/Zappa | zappa/wsgi.py | common_log | def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(response.status_code, environ,
len(response.content), rt_us=response_time)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(response.status_code, environ,
len(response.content), rt_ms=response_time)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ,
len(response.content))
logger.info(log_entry)
return log_entry | python | def common_log(environ, response, response_time=None):
"""
Given the WSGI environ and the response,
log this event in Common Log Format.
"""
logger = logging.getLogger()
if response_time:
formatter = ApacheFormatter(with_response_time=True)
try:
log_entry = formatter(response.status_code, environ,
len(response.content), rt_us=response_time)
except TypeError:
# Upstream introduced a very annoying breaking change on the rt_ms/rt_us kwarg.
log_entry = formatter(response.status_code, environ,
len(response.content), rt_ms=response_time)
else:
formatter = ApacheFormatter(with_response_time=False)
log_entry = formatter(response.status_code, environ,
len(response.content))
logger.info(log_entry)
return log_entry | [
"def",
"common_log",
"(",
"environ",
",",
"response",
",",
"response_time",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"if",
"response_time",
":",
"formatter",
"=",
"ApacheFormatter",
"(",
"with_response_time",
"=",
"True",
... | Given the WSGI environ and the response,
log this event in Common Log Format. | [
"Given",
"the",
"WSGI",
"environ",
"and",
"the",
"response",
"log",
"this",
"event",
"in",
"Common",
"Log",
"Format",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/wsgi.py#L171-L196 |
29,660 | Miserlou/Zappa | zappa/handler.py | LambdaHandler.load_remote_settings | def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource('s3')
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Could not load remote settings file.', e)
return
try:
content = remote_env_object['Body'].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Exception while reading remote settings file.', e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print('Failed to parse remote settings!')
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print('Adding {} -> {} to environment'.format(
key,
value
))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!") | python | def load_remote_settings(self, remote_bucket, remote_file):
"""
Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control.
"""
if not self.session:
boto_session = boto3.Session()
else:
boto_session = self.session
s3 = boto_session.resource('s3')
try:
remote_env_object = s3.Object(remote_bucket, remote_file).get()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Could not load remote settings file.', e)
return
try:
content = remote_env_object['Body'].read()
except Exception as e: # pragma: no cover
# catch everything aws might decide to raise
print('Exception while reading remote settings file.', e)
return
try:
settings_dict = json.loads(content)
except (ValueError, TypeError): # pragma: no cover
print('Failed to parse remote settings!')
return
# add each key-value to environment - overwrites existing keys!
for key, value in settings_dict.items():
if self.settings.LOG_LEVEL == "DEBUG":
print('Adding {} -> {} to environment'.format(
key,
value
))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
os.environ[str(key)] = value
except Exception:
if self.settings.LOG_LEVEL == "DEBUG":
print("Environment variable keys must be non-unicode!") | [
"def",
"load_remote_settings",
"(",
"self",
",",
"remote_bucket",
",",
"remote_file",
")",
":",
"if",
"not",
"self",
".",
"session",
":",
"boto_session",
"=",
"boto3",
".",
"Session",
"(",
")",
"else",
":",
"boto_session",
"=",
"self",
".",
"session",
"s3"... | Attempt to read a file from s3 containing a flat json object. Adds each
key->value pair as environment variables. Helpful for keeping
sensitiZve or stage-specific configuration variables in s3 instead of
version control. | [
"Attempt",
"to",
"read",
"a",
"file",
"from",
"s3",
"containing",
"a",
"flat",
"json",
"object",
".",
"Adds",
"each",
"key",
"-",
">",
"value",
"pair",
"as",
"environment",
"variables",
".",
"Helpful",
"for",
"keeping",
"sensitiZve",
"or",
"stage",
"-",
... | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L184-L230 |
29,661 | Miserlou/Zappa | zappa/handler.py | LambdaHandler.run_function | def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError("Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs.")
return result | python | def run_function(app_function, event, context):
"""
Given a function and event context,
detect signature and execute, returning any result.
"""
# getargspec does not support python 3 method with type hints
# Related issue: https://github.com/Miserlou/Zappa/issues/1452
if hasattr(inspect, "getfullargspec"): # Python 3
args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function)
else: # Python 2
args, varargs, keywords, defaults = inspect.getargspec(app_function)
num_args = len(args)
if num_args == 0:
result = app_function(event, context) if varargs else app_function()
elif num_args == 1:
result = app_function(event, context) if varargs else app_function(event)
elif num_args == 2:
result = app_function(event, context)
else:
raise RuntimeError("Function signature is invalid. Expected a function that accepts at most "
"2 arguments or varargs.")
return result | [
"def",
"run_function",
"(",
"app_function",
",",
"event",
",",
"context",
")",
":",
"# getargspec does not support python 3 method with type hints",
"# Related issue: https://github.com/Miserlou/Zappa/issues/1452",
"if",
"hasattr",
"(",
"inspect",
",",
"\"getfullargspec\"",
")",
... | Given a function and event context,
detect signature and execute, returning any result. | [
"Given",
"a",
"function",
"and",
"event",
"context",
"detect",
"signature",
"and",
"execute",
"returning",
"any",
"result",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L270-L291 |
29,662 | Miserlou/Zappa | zappa/handler.py | LambdaHandler.get_function_for_aws_event | def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if 's3' in record:
if ':' in record['s3']['configurationId']:
return record['s3']['configurationId'].split(':')[-1]
arn = None
if 'Sns' in record:
try:
message = json.loads(record['Sns']['Message'])
if message.get('command'):
return message['command']
except ValueError:
pass
arn = record['Sns'].get('TopicArn')
elif 'dynamodb' in record or 'kinesis' in record:
arn = record.get('eventSourceARN')
elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs':
arn = record.get('eventSourceARN')
elif 's3' in record:
arn = record['s3']['bucket']['arn']
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None | python | def get_function_for_aws_event(self, record):
"""
Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events
"""
if 's3' in record:
if ':' in record['s3']['configurationId']:
return record['s3']['configurationId'].split(':')[-1]
arn = None
if 'Sns' in record:
try:
message = json.loads(record['Sns']['Message'])
if message.get('command'):
return message['command']
except ValueError:
pass
arn = record['Sns'].get('TopicArn')
elif 'dynamodb' in record or 'kinesis' in record:
arn = record.get('eventSourceARN')
elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs':
arn = record.get('eventSourceARN')
elif 's3' in record:
arn = record['s3']['bucket']['arn']
if arn:
return self.settings.AWS_EVENT_MAPPING.get(arn)
return None | [
"def",
"get_function_for_aws_event",
"(",
"self",
",",
"record",
")",
":",
"if",
"'s3'",
"in",
"record",
":",
"if",
"':'",
"in",
"record",
"[",
"'s3'",
"]",
"[",
"'configurationId'",
"]",
":",
"return",
"record",
"[",
"'s3'",
"]",
"[",
"'configurationId'",... | Get the associated function to execute for a triggered AWS event
Support S3, SNS, DynamoDB, kinesis and SQS events | [
"Get",
"the",
"associated",
"function",
"to",
"execute",
"for",
"a",
"triggered",
"AWS",
"event"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L293-L322 |
29,663 | Miserlou/Zappa | zappa/handler.py | LambdaHandler.get_function_from_bot_intent_trigger | def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get('currentIntent')
if intent:
intent = intent.get('name')
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get('invocationSource'))
) | python | def get_function_from_bot_intent_trigger(self, event):
"""
For the given event build ARN and return the configured function
"""
intent = event.get('currentIntent')
if intent:
intent = intent.get('name')
if intent:
return self.settings.AWS_BOT_EVENT_MAPPING.get(
"{}:{}".format(intent, event.get('invocationSource'))
) | [
"def",
"get_function_from_bot_intent_trigger",
"(",
"self",
",",
"event",
")",
":",
"intent",
"=",
"event",
".",
"get",
"(",
"'currentIntent'",
")",
"if",
"intent",
":",
"intent",
"=",
"intent",
".",
"get",
"(",
"'name'",
")",
"if",
"intent",
":",
"return"... | For the given event build ARN and return the configured function | [
"For",
"the",
"given",
"event",
"build",
"ARN",
"and",
"return",
"the",
"configured",
"function"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L324-L334 |
29,664 | Miserlou/Zappa | zappa/handler.py | LambdaHandler.get_function_for_cognito_trigger | def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger))
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger) | python | def get_function_for_cognito_trigger(self, trigger):
"""
Get the associated function to execute for a cognito trigger
"""
print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger))
return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger) | [
"def",
"get_function_for_cognito_trigger",
"(",
"self",
",",
"trigger",
")",
":",
"print",
"(",
"\"get_function_for_cognito_trigger\"",
",",
"self",
".",
"settings",
".",
"COGNITO_TRIGGER_MAPPING",
",",
"trigger",
",",
"self",
".",
"settings",
".",
"COGNITO_TRIGGER_MA... | Get the associated function to execute for a cognito trigger | [
"Get",
"the",
"associated",
"function",
"to",
"execute",
"for",
"a",
"cognito",
"trigger"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/handler.py#L336-L341 |
29,665 | Miserlou/Zappa | example/authmodule.py | lambda_handler | def lambda_handler(event, context):
print("Client token: " + event['authorizationToken'])
print("Method ARN: " + event['methodArn'])
"""validate the incoming token"""
"""and produce the principal user identifier associated with the token"""
"""this could be accomplished in a number of ways:"""
"""1. Call out to OAuth provider"""
"""2. Decode a JWT token inline"""
"""3. Lookup in a self-managed DB"""
principalId = "user|a1b2c3d4"
"""you can send a 401 Unauthorized response to the client by failing like so:"""
"""raise Exception('Unauthorized')"""
"""if the token is valid, a policy must be generated which will allow or deny access to the client"""
"""if access is denied, the client will receive a 403 Access Denied response"""
"""if access is allowed, API Gateway will proceed with the backend integration configured on the method that was called"""
"""this function must generate a policy that is associated with the recognized principal user identifier."""
"""depending on your use case, you might store policies in a DB, or generate them on the fly"""
"""keep in mind, the policy is cached for 5 minutes by default (TTL is configurable in the authorizer)"""
"""and will apply to subsequent calls to any method/resource in the RestApi"""
"""made with the same token"""
"""the example policy below denies access to all resources in the RestApi"""
tmp = event['methodArn'].split(':')
apiGatewayArnTmp = tmp[5].split('/')
awsAccountId = tmp[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = tmp[3]
policy.stage = apiGatewayArnTmp[1]
# Blueprint denies all methods by default
# policy.denyAllMethods()
# Example allows all methods
policy.allowAllMethods()
"""policy.allowMethod(HttpVerb.GET, "/pets/*")"""
"""finally, build the policy and exit the function using return"""
return policy.build() | python | def lambda_handler(event, context):
print("Client token: " + event['authorizationToken'])
print("Method ARN: " + event['methodArn'])
"""validate the incoming token"""
"""and produce the principal user identifier associated with the token"""
"""this could be accomplished in a number of ways:"""
"""1. Call out to OAuth provider"""
"""2. Decode a JWT token inline"""
"""3. Lookup in a self-managed DB"""
principalId = "user|a1b2c3d4"
"""you can send a 401 Unauthorized response to the client by failing like so:"""
"""raise Exception('Unauthorized')"""
"""if the token is valid, a policy must be generated which will allow or deny access to the client"""
"""if access is denied, the client will receive a 403 Access Denied response"""
"""if access is allowed, API Gateway will proceed with the backend integration configured on the method that was called"""
"""this function must generate a policy that is associated with the recognized principal user identifier."""
"""depending on your use case, you might store policies in a DB, or generate them on the fly"""
"""keep in mind, the policy is cached for 5 minutes by default (TTL is configurable in the authorizer)"""
"""and will apply to subsequent calls to any method/resource in the RestApi"""
"""made with the same token"""
"""the example policy below denies access to all resources in the RestApi"""
tmp = event['methodArn'].split(':')
apiGatewayArnTmp = tmp[5].split('/')
awsAccountId = tmp[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = tmp[3]
policy.stage = apiGatewayArnTmp[1]
# Blueprint denies all methods by default
# policy.denyAllMethods()
# Example allows all methods
policy.allowAllMethods()
"""policy.allowMethod(HttpVerb.GET, "/pets/*")"""
"""finally, build the policy and exit the function using return"""
return policy.build() | [
"def",
"lambda_handler",
"(",
"event",
",",
"context",
")",
":",
"print",
"(",
"\"Client token: \"",
"+",
"event",
"[",
"'authorizationToken'",
"]",
")",
"print",
"(",
"\"Method ARN: \"",
"+",
"event",
"[",
"'methodArn'",
"]",
")",
"\"\"\"and produce the principal... | validate the incoming token | [
"validate",
"the",
"incoming",
"token"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/example/authmodule.py#L15-L61 |
29,666 | Miserlou/Zappa | example/authmodule.py | AuthPolicy._addMethod | def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
}) | python | def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
}) | [
"def",
"_addMethod",
"(",
"self",
",",
"effect",
",",
"verb",
",",
"resource",
",",
"conditions",
")",
":",
"if",
"verb",
"!=",
"\"*\"",
"and",
"not",
"hasattr",
"(",
"HttpVerb",
",",
"verb",
")",
":",
"raise",
"NameError",
"(",
"\"Invalid HTTP verb \"",
... | Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null. | [
"Adds",
"a",
"method",
"to",
"the",
"internal",
"lists",
"of",
"allowed",
"or",
"denied",
"methods",
".",
"Each",
"object",
"in",
"the",
"internal",
"list",
"contains",
"a",
"resource",
"ARN",
"and",
"a",
"condition",
"statement",
".",
"The",
"condition",
... | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/example/authmodule.py#L104-L134 |
29,667 | Miserlou/Zappa | zappa/core.py | Zappa.boto_client | def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)) | python | def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)) | [
"def",
"boto_client",
"(",
"self",
",",
"service",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"boto_session",
".",
"client",
"(",
"service",
",",
"*",
"args",
",",
"*",
"*",
"self",
".",
"configure_boto_session_method_kwa... | A wrapper to apply configuration options to boto clients | [
"A",
"wrapper",
"to",
"apply",
"configuration",
"options",
"to",
"boto",
"clients"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L334-L336 |
29,668 | Miserlou/Zappa | zappa/core.py | Zappa.boto_resource | def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)) | python | def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs)) | [
"def",
"boto_resource",
"(",
"self",
",",
"service",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"boto_session",
".",
"resource",
"(",
"service",
",",
"*",
"args",
",",
"*",
"*",
"self",
".",
"configure_boto_session_method... | A wrapper to apply configuration options to boto resources | [
"A",
"wrapper",
"to",
"apply",
"configuration",
"options",
"to",
"boto",
"resources"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L338-L340 |
29,669 | Miserlou/Zappa | zappa/core.py | Zappa.cache_param | def cache_param(self, value):
'''Returns a troposphere Ref to a value cached as a parameter.'''
if value not in self.cf_parameters:
keyname = chr(ord('A') + len(self.cf_parameters))
param = self.cf_template.add_parameter(troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
))
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value]) | python | def cache_param(self, value):
'''Returns a troposphere Ref to a value cached as a parameter.'''
if value not in self.cf_parameters:
keyname = chr(ord('A') + len(self.cf_parameters))
param = self.cf_template.add_parameter(troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
))
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value]) | [
"def",
"cache_param",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"not",
"in",
"self",
".",
"cf_parameters",
":",
"keyname",
"=",
"chr",
"(",
"ord",
"(",
"'A'",
")",
"+",
"len",
"(",
"self",
".",
"cf_parameters",
")",
")",
"param",
"=",
"se... | Returns a troposphere Ref to a value cached as a parameter. | [
"Returns",
"a",
"troposphere",
"Ref",
"to",
"a",
"value",
"cached",
"as",
"a",
"parameter",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L342-L353 |
29,670 | Miserlou/Zappa | zappa/core.py | Zappa.get_deps_list | def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) | python | def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) | [
"def",
"get_deps_list",
"(",
"self",
",",
"pkg_name",
",",
"installed_distros",
"=",
"None",
")",
":",
"# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`",
"# instead of `pip` is the recommended approach. The usage is nearly",
"# identical.",
"import",
"pkg_res... | For a given package, returns a list of required packages. Recursive. | [
"For",
"a",
"given",
"package",
"returns",
"a",
"list",
"of",
"required",
"packages",
".",
"Recursive",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L373-L389 |
29,671 | Miserlou/Zappa | zappa/core.py | Zappa.create_handler_venv | def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), 'handler_venv')
if os.sys.platform == 'win32':
current_site_packages_dir = os.path.join(current_venv, 'Lib', 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'Lib', 'site-packages')
else:
current_site_packages_dir = os.path.join(current_venv, 'lib', get_venv_from_python_version(), 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'lib', get_venv_from_python_version(), 'site-packages')
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == 'zappa']
for z in zappa_things:
copytree(os.path.join(current_site_packages_dir, z), os.path.join(venv_site_packages_dir, z))
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list('zappa')
pkg_list = ['{0!s}=={1!s}'.format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append('setuptools')
command = ["pip", "install", "--quiet", "--target", venv_site_packages_dir] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path | python | def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), 'handler_venv')
if os.sys.platform == 'win32':
current_site_packages_dir = os.path.join(current_venv, 'Lib', 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'Lib', 'site-packages')
else:
current_site_packages_dir = os.path.join(current_venv, 'lib', get_venv_from_python_version(), 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'lib', get_venv_from_python_version(), 'site-packages')
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == 'zappa']
for z in zappa_things:
copytree(os.path.join(current_site_packages_dir, z), os.path.join(venv_site_packages_dir, z))
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list('zappa')
pkg_list = ['{0!s}=={1!s}'.format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append('setuptools')
command = ["pip", "install", "--quiet", "--target", venv_site_packages_dir] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path | [
"def",
"create_handler_venv",
"(",
"self",
")",
":",
"import",
"subprocess",
"# We will need the currenv venv to pull Zappa from",
"current_venv",
"=",
"self",
".",
"get_current_venv",
"(",
")",
"# Make a new folder for the handler packages",
"ve_path",
"=",
"os",
".",
"pat... | Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded. | [
"Takes",
"the",
"installed",
"zappa",
"and",
"brings",
"it",
"into",
"a",
"fresh",
"virtualenv",
"-",
"like",
"folder",
".",
"All",
"dependencies",
"are",
"then",
"downloaded",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L391-L437 |
29,672 | Miserlou/Zappa | zappa/core.py | Zappa.get_current_venv | def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv, "
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv | python | def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv, "
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv | [
"def",
"get_current_venv",
"(",
")",
":",
"if",
"'VIRTUAL_ENV'",
"in",
"os",
".",
"environ",
":",
"venv",
"=",
"os",
".",
"environ",
"[",
"'VIRTUAL_ENV'",
"]",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"'.python-version'",
")",
":",
"# pragma: no cove... | Returns the path to the current virtualenv | [
"Returns",
"the",
"path",
"to",
"the",
"current",
"virtualenv"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L441-L461 |
29,673 | Miserlou/Zappa | zappa/core.py | Zappa.extract_lambda_package | def extract_lambda_package(self, package_name, path):
"""
Extracts the lambda package into a given path. Assumes the package exists in lambda packages.
"""
lambda_package = lambda_packages[package_name][self.runtime]
# Trash the local version to help with package space saving
shutil.rmtree(os.path.join(path, package_name), ignore_errors=True)
tar = tarfile.open(lambda_package['path'], mode="r:gz")
for member in tar.getmembers():
tar.extract(member, path) | python | def extract_lambda_package(self, package_name, path):
"""
Extracts the lambda package into a given path. Assumes the package exists in lambda packages.
"""
lambda_package = lambda_packages[package_name][self.runtime]
# Trash the local version to help with package space saving
shutil.rmtree(os.path.join(path, package_name), ignore_errors=True)
tar = tarfile.open(lambda_package['path'], mode="r:gz")
for member in tar.getmembers():
tar.extract(member, path) | [
"def",
"extract_lambda_package",
"(",
"self",
",",
"package_name",
",",
"path",
")",
":",
"lambda_package",
"=",
"lambda_packages",
"[",
"package_name",
"]",
"[",
"self",
".",
"runtime",
"]",
"# Trash the local version to help with package space saving",
"shutil",
".",
... | Extracts the lambda package into a given path. Assumes the package exists in lambda packages. | [
"Extracts",
"the",
"lambda",
"package",
"into",
"a",
"given",
"path",
".",
"Assumes",
"the",
"package",
"exists",
"in",
"lambda",
"packages",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L762-L773 |
29,674 | Miserlou/Zappa | zappa/core.py | Zappa.get_installed_packages | def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in
pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages | python | def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in
pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages | [
"def",
"get_installed_packages",
"(",
"site_packages",
",",
"site_packages_64",
")",
":",
"import",
"pkg_resources",
"package_to_keep",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"site_packages",
")",
":",
"package_to_keep",
"+=",
"os",
".",
"l... | Returns a dict of installed packages that Zappa cares about. | [
"Returns",
"a",
"dict",
"of",
"installed",
"packages",
"that",
"Zappa",
"cares",
"about",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L776-L795 |
29,675 | Miserlou/Zappa | zappa/core.py | Zappa.have_correct_lambda_package_version | def have_correct_lambda_package_version(self, package_name, package_version):
"""
Checks if a given package version binary should be copied over from lambda packages.
package_name should be lower-cased version of package name.
"""
lambda_package_details = lambda_packages.get(package_name, {}).get(self.runtime)
if lambda_package_details is None:
return False
# Binaries can be compiled for different package versions
# Related: https://github.com/Miserlou/Zappa/issues/800
if package_version != lambda_package_details['version']:
return False
return True | python | def have_correct_lambda_package_version(self, package_name, package_version):
"""
Checks if a given package version binary should be copied over from lambda packages.
package_name should be lower-cased version of package name.
"""
lambda_package_details = lambda_packages.get(package_name, {}).get(self.runtime)
if lambda_package_details is None:
return False
# Binaries can be compiled for different package versions
# Related: https://github.com/Miserlou/Zappa/issues/800
if package_version != lambda_package_details['version']:
return False
return True | [
"def",
"have_correct_lambda_package_version",
"(",
"self",
",",
"package_name",
",",
"package_version",
")",
":",
"lambda_package_details",
"=",
"lambda_packages",
".",
"get",
"(",
"package_name",
",",
"{",
"}",
")",
".",
"get",
"(",
"self",
".",
"runtime",
")",... | Checks if a given package version binary should be copied over from lambda packages.
package_name should be lower-cased version of package name. | [
"Checks",
"if",
"a",
"given",
"package",
"version",
"binary",
"should",
"be",
"copied",
"over",
"from",
"lambda",
"packages",
".",
"package_name",
"should",
"be",
"lower",
"-",
"cased",
"version",
"of",
"package",
"name",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L797-L812 |
29,676 | Miserlou/Zappa | zappa/core.py | Zappa.get_cached_manylinux_wheel | def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
wheel_file = '{0!s}-{1!s}-{2!s}'.format(package_name, package_version, self.manylinux_wheel_file_suffix)
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
if not os.path.exists(wheel_path) or not zipfile.is_zipfile(wheel_path):
# The file is not cached, download it.
wheel_url = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
print(" - {}=={}: Downloading".format(package_name, package_version))
with open(wheel_path, 'wb') as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
else:
print(" - {}=={}: Using locally cached manylinux wheel".format(package_name, package_version))
return wheel_path | python | def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
wheel_file = '{0!s}-{1!s}-{2!s}'.format(package_name, package_version, self.manylinux_wheel_file_suffix)
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
if not os.path.exists(wheel_path) or not zipfile.is_zipfile(wheel_path):
# The file is not cached, download it.
wheel_url = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
print(" - {}=={}: Downloading".format(package_name, package_version))
with open(wheel_path, 'wb') as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
else:
print(" - {}=={}: Using locally cached manylinux wheel".format(package_name, package_version))
return wheel_path | [
"def",
"get_cached_manylinux_wheel",
"(",
"self",
",",
"package_name",
",",
"package_version",
",",
"disable_progress",
"=",
"False",
")",
":",
"cached_wheels_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'cache... | Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it. | [
"Gets",
"the",
"locally",
"stored",
"version",
"of",
"a",
"manylinux",
"wheel",
".",
"If",
"one",
"does",
"not",
"exist",
"the",
"function",
"downloads",
"it",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L838-L864 |
29,677 | Miserlou/Zappa | zappa/core.py | Zappa.get_manylinux_wheel_url | def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None
for f in data['releases'][package_version]:
if f['filename'].endswith(self.manylinux_wheel_file_suffix):
return f['url']
return None | python | def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None
for f in data['releases'][package_version]:
if f['filename'].endswith(self.manylinux_wheel_file_suffix):
return f['url']
return None | [
"def",
"get_manylinux_wheel_url",
"(",
"self",
",",
"package_name",
",",
"package_version",
")",
":",
"cached_pypi_info_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'cached_pypi_info'",
")",
"if",
"not",
"os",
... | For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time. | [
"For",
"a",
"given",
"package",
"name",
"returns",
"a",
"link",
"to",
"the",
"download",
"URL",
"else",
"returns",
"None",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L866-L909 |
29,678 | Miserlou/Zappa | zappa/core.py | Zappa.copy_on_s3 | def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False | python | def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False | [
"def",
"copy_on_s3",
"(",
"self",
",",
"src_file_name",
",",
"dst_file_name",
",",
"bucket_name",
")",
":",
"try",
":",
"self",
".",
"s3_client",
".",
"head_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"except",
"botocore",
".",
"exceptions",
".",
"Clien... | Copies src file to destination within a bucket. | [
"Copies",
"src",
"file",
"to",
"destination",
"within",
"a",
"bucket",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L975-L1000 |
29,679 | Miserlou/Zappa | zappa/core.py | Zappa.remove_from_s3 | def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError): # pragma: no cover
return False | python | def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError): # pragma: no cover
return False | [
"def",
"remove_from_s3",
"(",
"self",
",",
"file_name",
",",
"bucket_name",
")",
":",
"try",
":",
"self",
".",
"s3_client",
".",
"head_bucket",
"(",
"Bucket",
"=",
"bucket_name",
")",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
... | Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure. | [
"Given",
"a",
"file",
"name",
"and",
"a",
"bucket",
"remove",
"it",
"from",
"S3",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1002-L1024 |
29,680 | Miserlou/Zappa | zappa/core.py | Zappa.update_lambda_configuration | def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python2.7',
aws_environment_variables=None,
aws_kms_key_arn=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not aws_environment_variables:
aws_environment_variables = {}
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get("Variables", {})
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
}
)
resource_arn = response['FunctionArn']
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn | python | def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python2.7',
aws_environment_variables=None,
aws_kms_key_arn=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not aws_environment_variables:
aws_environment_variables = {}
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get("Variables", {})
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
}
)
resource_arn = response['FunctionArn']
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn | [
"def",
"update_lambda_configuration",
"(",
"self",
",",
"lambda_arn",
",",
"function_name",
",",
"handler",
",",
"description",
"=",
"'Zappa Deployment'",
",",
"timeout",
"=",
"30",
",",
"memory_size",
"=",
"512",
",",
"publish",
"=",
"True",
",",
"vpc_config",
... | Given an existing function ARN, update the configuration variables. | [
"Given",
"an",
"existing",
"function",
"ARN",
"update",
"the",
"configuration",
"variables",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1174-L1232 |
29,681 | Miserlou/Zappa | zappa/core.py | Zappa.invoke_lambda_function | def invoke_lambda_function( self,
function_name,
payload,
invocation_type='Event',
log_type='Tail',
client_context=None,
qualifier=None
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload
) | python | def invoke_lambda_function( self,
function_name,
payload,
invocation_type='Event',
log_type='Tail',
client_context=None,
qualifier=None
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload
) | [
"def",
"invoke_lambda_function",
"(",
"self",
",",
"function_name",
",",
"payload",
",",
"invocation_type",
"=",
"'Event'",
",",
"log_type",
"=",
"'Tail'",
",",
"client_context",
"=",
"None",
",",
"qualifier",
"=",
"None",
")",
":",
"return",
"self",
".",
"l... | Directly invoke a named Lambda function with a payload.
Returns the response. | [
"Directly",
"invoke",
"a",
"named",
"Lambda",
"function",
"with",
"a",
"payload",
".",
"Returns",
"the",
"response",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1234-L1251 |
29,682 | Miserlou/Zappa | zappa/core.py | Zappa.rollback_lambda_function_version | def rollback_lambda_function_version(self, function_name, versions_back=1, publish=True):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(FunctionName=function_name)
# Take into account $LATEST
if len(response['Versions']) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [int(revision['Version']) for revision in response['Versions'] if revision['Version'] != '$LATEST']
revisions.sort(reverse=True)
response = self.lambda_client.get_function(FunctionName='function:{}:{}'.format(function_name, revisions[versions_back]))
response = requests.get(response['Code']['Location'])
if response.status_code != 200:
print("Failed to get version {} of {} code".format(versions_back, function_name))
return False
response = self.lambda_client.update_function_code(FunctionName=function_name, ZipFile=response.content, Publish=publish) # pragma: no cover
return response['FunctionArn'] | python | def rollback_lambda_function_version(self, function_name, versions_back=1, publish=True):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(FunctionName=function_name)
# Take into account $LATEST
if len(response['Versions']) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [int(revision['Version']) for revision in response['Versions'] if revision['Version'] != '$LATEST']
revisions.sort(reverse=True)
response = self.lambda_client.get_function(FunctionName='function:{}:{}'.format(function_name, revisions[versions_back]))
response = requests.get(response['Code']['Location'])
if response.status_code != 200:
print("Failed to get version {} of {} code".format(versions_back, function_name))
return False
response = self.lambda_client.update_function_code(FunctionName=function_name, ZipFile=response.content, Publish=publish) # pragma: no cover
return response['FunctionArn'] | [
"def",
"rollback_lambda_function_version",
"(",
"self",
",",
"function_name",
",",
"versions_back",
"=",
"1",
",",
"publish",
"=",
"True",
")",
":",
"response",
"=",
"self",
".",
"lambda_client",
".",
"list_versions_by_function",
"(",
"FunctionName",
"=",
"functio... | Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN. | [
"Rollback",
"the",
"lambda",
"function",
"code",
"versions_back",
"number",
"of",
"revisions",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1253-L1278 |
29,683 | Miserlou/Zappa | zappa/core.py | Zappa.get_lambda_function | def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(
FunctionName=function_name)
return response['Configuration']['FunctionArn'] | python | def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(
FunctionName=function_name)
return response['Configuration']['FunctionArn'] | [
"def",
"get_lambda_function",
"(",
"self",
",",
"function_name",
")",
":",
"response",
"=",
"self",
".",
"lambda_client",
".",
"get_function",
"(",
"FunctionName",
"=",
"function_name",
")",
"return",
"response",
"[",
"'Configuration'",
"]",
"[",
"'FunctionArn'",
... | Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role. | [
"Returns",
"the",
"lambda",
"function",
"ARN",
"given",
"a",
"name"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1280-L1288 |
29,684 | Miserlou/Zappa | zappa/core.py | Zappa.get_lambda_function_versions | def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get('Versions', [])
except Exception:
return [] | python | def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get('Versions', [])
except Exception:
return [] | [
"def",
"get_lambda_function_versions",
"(",
"self",
",",
"function_name",
")",
":",
"try",
":",
"response",
"=",
"self",
".",
"lambda_client",
".",
"list_versions_by_function",
"(",
"FunctionName",
"=",
"function_name",
")",
"return",
"response",
".",
"get",
"(",
... | Simply returns the versions available for a Lambda function, given a function name. | [
"Simply",
"returns",
"the",
"versions",
"available",
"for",
"a",
"Lambda",
"function",
"given",
"a",
"function",
"name",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1290-L1301 |
29,685 | Miserlou/Zappa | zappa/core.py | Zappa.create_api_gateway_routes | def create_api_gateway_routes( self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type='NONE',
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi('Api')
restapi.Name = api_name or lambda_arn.split(':')[-1]
if not description:
description = 'Created automatically by Zappa.'
restapi.Description = description
endpoint_configuration = [] if endpoint_configuration is None else endpoint_configuration
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, 'RootResourceId')
invocation_prefix = "aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
invocations_uri = 'arn:' + invocation_prefix + ':apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get('arn', lambda_arn)
lambda_uri = 'arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods( restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0
)
if cors_options:
self.create_and_setup_cors( restapi,
root_id,
invocations_uri,
0,
cors_options
)
resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed')
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods( restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1
) # pragma: no cover
if cors_options:
self.create_and_setup_cors( restapi,
resource,
invocations_uri,
1,
cors_options
) # pragma: no cover
return restapi | python | def create_api_gateway_routes( self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type='NONE',
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi('Api')
restapi.Name = api_name or lambda_arn.split(':')[-1]
if not description:
description = 'Created automatically by Zappa.'
restapi.Description = description
endpoint_configuration = [] if endpoint_configuration is None else endpoint_configuration
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, 'RootResourceId')
invocation_prefix = "aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
invocations_uri = 'arn:' + invocation_prefix + ':apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get('arn', lambda_arn)
lambda_uri = 'arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods( restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0
)
if cors_options:
self.create_and_setup_cors( restapi,
root_id,
invocations_uri,
0,
cors_options
)
resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed')
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods( restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1
) # pragma: no cover
if cors_options:
self.create_and_setup_cors( restapi,
resource,
invocations_uri,
1,
cors_options
) # pragma: no cover
return restapi | [
"def",
"create_api_gateway_routes",
"(",
"self",
",",
"lambda_arn",
",",
"api_name",
"=",
"None",
",",
"api_key_required",
"=",
"False",
",",
"authorization_type",
"=",
"'NONE'",
",",
"authorizer",
"=",
"None",
",",
"cors_options",
"=",
"None",
",",
"description... | Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource. | [
"Create",
"the",
"API",
"Gateway",
"for",
"this",
"Zappa",
"deployment",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1525-L1616 |
29,686 | Miserlou/Zappa | zappa/core.py | Zappa.create_authorizer | def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get('validation_expression', None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization')
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = identity_validation_expression
if authorizer_type == 'TOKEN':
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == 'COGNITO_USER_POOLS':
authorizer_resource.ProviderARNs = authorizer.get('provider_arns')
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource | python | def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get('validation_expression', None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization')
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = identity_validation_expression
if authorizer_type == 'TOKEN':
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == 'COGNITO_USER_POOLS':
authorizer_resource.ProviderARNs = authorizer.get('provider_arns')
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource | [
"def",
"create_authorizer",
"(",
"self",
",",
"restapi",
",",
"uri",
",",
"authorizer",
")",
":",
"authorizer_type",
"=",
"authorizer",
".",
"get",
"(",
"\"type\"",
",",
"\"TOKEN\"",
")",
".",
"upper",
"(",
")",
"identity_validation_expression",
"=",
"authoriz... | Create Authorizer for API gateway | [
"Create",
"Authorizer",
"for",
"API",
"gateway"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1618-L1645 |
29,687 | Miserlou/Zappa | zappa/core.py | Zappa.deploy_api_gateway | def deploy_api_gateway( self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size='0.5',
variables=None,
cloudwatch_log_level='OFF',
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {}
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)),
self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted)
]
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name) | python | def deploy_api_gateway( self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size='0.5',
variables=None,
cloudwatch_log_level='OFF',
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {}
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)),
self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted)
]
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name) | [
"def",
"deploy_api_gateway",
"(",
"self",
",",
"api_id",
",",
"stage_name",
",",
"stage_description",
"=",
"\"\"",
",",
"description",
"=",
"\"\"",
",",
"cache_cluster_enabled",
"=",
"False",
",",
"cache_cluster_size",
"=",
"'0.5'",
",",
"variables",
"=",
"None"... | Deploy the API Gateway!
Return the deployed API URL. | [
"Deploy",
"the",
"API",
"Gateway!"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1751-L1797 |
29,688 | Miserlou/Zappa | zappa/core.py | Zappa.remove_binary_support | def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": ""
}
]
) | python | def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": ""
}
]
) | [
"def",
"remove_binary_support",
"(",
"self",
",",
"api_id",
",",
"cors",
"=",
"False",
")",
":",
"response",
"=",
"self",
".",
"apigateway_client",
".",
"get_rest_api",
"(",
"restApiId",
"=",
"api_id",
")",
"if",
"\"binaryMediaTypes\"",
"in",
"response",
"and"... | Remove binary support | [
"Remove",
"binary",
"support"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1840-L1877 |
29,689 | Miserlou/Zappa | zappa/core.py | Zappa.add_api_compression | def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
'value': str(min_compression_size)
}
]
) | python | def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
'value': str(min_compression_size)
}
]
) | [
"def",
"add_api_compression",
"(",
"self",
",",
"api_id",
",",
"min_compression_size",
")",
":",
"self",
".",
"apigateway_client",
".",
"update_rest_api",
"(",
"restApiId",
"=",
"api_id",
",",
"patchOperations",
"=",
"[",
"{",
"'op'",
":",
"'replace'",
",",
"'... | Add Rest API compression | [
"Add",
"Rest",
"API",
"compression"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1879-L1892 |
29,690 | Miserlou/Zappa | zappa/core.py | Zappa.get_api_keys | def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = '{}/{}'.format(api_id, stage_name)
for api_key in response.get('items'):
if stage_key in api_key.get('stageKeys'):
yield api_key.get('id') | python | def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = '{}/{}'.format(api_id, stage_name)
for api_key in response.get('items'):
if stage_key in api_key.get('stageKeys'):
yield api_key.get('id') | [
"def",
"get_api_keys",
"(",
"self",
",",
"api_id",
",",
"stage_name",
")",
":",
"response",
"=",
"self",
".",
"apigateway_client",
".",
"get_api_keys",
"(",
"limit",
"=",
"500",
")",
"stage_key",
"=",
"'{}/{}'",
".",
"format",
"(",
"api_id",
",",
"stage_na... | Generator that allows to iterate per API keys associated to an api_id and a stage_name. | [
"Generator",
"that",
"allows",
"to",
"iterate",
"per",
"API",
"keys",
"associated",
"to",
"an",
"api_id",
"and",
"a",
"stage_name",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1908-L1916 |
29,691 | Miserlou/Zappa | zappa/core.py | Zappa.create_api_key | def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id'])) | python | def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id'])) | [
"def",
"create_api_key",
"(",
"self",
",",
"api_id",
",",
"stage_name",
")",
":",
"response",
"=",
"self",
".",
"apigateway_client",
".",
"create_api_key",
"(",
"name",
"=",
"'{}_{}'",
".",
"format",
"(",
"stage_name",
",",
"api_id",
")",
",",
"description",... | Create new API key and link it with an api_id and a stage_name | [
"Create",
"new",
"API",
"key",
"and",
"link",
"it",
"with",
"an",
"api_id",
"and",
"a",
"stage_name"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1918-L1933 |
29,692 | Miserlou/Zappa | zappa/core.py | Zappa.remove_api_key | def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
) | python | def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
) | [
"def",
"remove_api_key",
"(",
"self",
",",
"api_id",
",",
"stage_name",
")",
":",
"response",
"=",
"self",
".",
"apigateway_client",
".",
"get_api_keys",
"(",
"limit",
"=",
"1",
",",
"nameQuery",
"=",
"'{}_{}'",
".",
"format",
"(",
"stage_name",
",",
"api_... | Remove a generated API key for api_id and stage_name | [
"Remove",
"a",
"generated",
"API",
"key",
"for",
"api_id",
"and",
"stage_name"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1935-L1946 |
29,693 | Miserlou/Zappa | zappa/core.py | Zappa.add_api_stage_to_api_key | def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
'op': 'add',
'path': '/stages',
'value': '{}/{}'.format(api_id, stage_name)
}
]
) | python | def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
'op': 'add',
'path': '/stages',
'value': '{}/{}'.format(api_id, stage_name)
}
]
) | [
"def",
"add_api_stage_to_api_key",
"(",
"self",
",",
"api_key",
",",
"api_id",
",",
"stage_name",
")",
":",
"self",
".",
"apigateway_client",
".",
"update_api_key",
"(",
"apiKey",
"=",
"api_key",
",",
"patchOperations",
"=",
"[",
"{",
"'op'",
":",
"'add'",
"... | Add api stage to Api key | [
"Add",
"api",
"stage",
"to",
"Api",
"key"
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1948-L1961 |
29,694 | Miserlou/Zappa | zappa/core.py | Zappa.get_patch_op | def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value} | python | def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value} | [
"def",
"get_patch_op",
"(",
"self",
",",
"keypath",
",",
"value",
",",
"op",
"=",
"'replace'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"return",
"{",
"'op'",... | Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods. | [
"Return",
"an",
"object",
"that",
"describes",
"a",
"change",
"of",
"configuration",
"on",
"the",
"given",
"staging",
".",
"Setting",
"will",
"be",
"applied",
"on",
"all",
"available",
"HTTP",
"methods",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1963-L1970 |
29,695 | Miserlou/Zappa | zappa/core.py | Zappa.get_rest_apis | def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(
limit=500
)
for api in all_apis['items']:
if api['name'] != project_name:
continue
yield api | python | def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(
limit=500
)
for api in all_apis['items']:
if api['name'] != project_name:
continue
yield api | [
"def",
"get_rest_apis",
"(",
"self",
",",
"project_name",
")",
":",
"all_apis",
"=",
"self",
".",
"apigateway_client",
".",
"get_rest_apis",
"(",
"limit",
"=",
"500",
")",
"for",
"api",
"in",
"all_apis",
"[",
"'items'",
"]",
":",
"if",
"api",
"[",
"'name... | Generator that allows to iterate per every available apis. | [
"Generator",
"that",
"allows",
"to",
"iterate",
"per",
"every",
"available",
"apis",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1972-L1983 |
29,696 | Miserlou/Zappa | zappa/core.py | Zappa.undeploy_api_gateway | def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
) | python | def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
) | [
"def",
"undeploy_api_gateway",
"(",
"self",
",",
"lambda_name",
",",
"domain_name",
"=",
"None",
",",
"base_path",
"=",
"None",
")",
":",
"print",
"(",
"\"Deleting API Gateway..\"",
")",
"api_id",
"=",
"self",
".",
"get_api_id",
"(",
"lambda_name",
")",
"if",
... | Delete a deployed REST API Gateway. | [
"Delete",
"a",
"deployed",
"REST",
"API",
"Gateway",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1985-L2014 |
29,697 | Miserlou/Zappa | zappa/core.py | Zappa.update_stage_config | def update_stage_config( self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api['id'],
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
) | python | def update_stage_config( self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api['id'],
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
) | [
"def",
"update_stage_config",
"(",
"self",
",",
"project_name",
",",
"stage_name",
",",
"cloudwatch_log_level",
",",
"cloudwatch_data_trace",
",",
"cloudwatch_metrics_enabled",
")",
":",
"if",
"cloudwatch_log_level",
"not",
"in",
"self",
".",
"cloudwatch_log_levels",
":... | Update CloudWatch metrics configuration. | [
"Update",
"CloudWatch",
"metrics",
"configuration",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2016-L2038 |
29,698 | Miserlou/Zappa | zappa/core.py | Zappa.delete_stack | def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
except: # pragma: no cover
print('No Zappa stack named {0}'.format(name))
return False
tags = {x['Key']:x['Value'] for x in stack['Tags']}
if tags.get('ZappaProject') == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter('stack_delete_complete')
print('Waiting for stack {0} to be deleted..'.format(name))
waiter.wait(StackName=name)
return True
else:
print('ZappaProject tag not found on {0}, doing nothing'.format(name))
return False | python | def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
except: # pragma: no cover
print('No Zappa stack named {0}'.format(name))
return False
tags = {x['Key']:x['Value'] for x in stack['Tags']}
if tags.get('ZappaProject') == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter('stack_delete_complete')
print('Waiting for stack {0} to be deleted..'.format(name))
waiter.wait(StackName=name)
return True
else:
print('ZappaProject tag not found on {0}, doing nothing'.format(name))
return False | [
"def",
"delete_stack",
"(",
"self",
",",
"name",
",",
"wait",
"=",
"False",
")",
":",
"try",
":",
"stack",
"=",
"self",
".",
"cf_client",
".",
"describe_stacks",
"(",
"StackName",
"=",
"name",
")",
"[",
"'Stacks'",
"]",
"[",
"0",
"]",
"except",
":",
... | Delete the CF stack managed by Zappa. | [
"Delete",
"the",
"CF",
"stack",
"managed",
"by",
"Zappa",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2076-L2096 |
29,699 | Miserlou/Zappa | zappa/core.py | Zappa.create_stack_template | def create_stack_template( self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn("Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization")
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.add_description('Automatically generated with Zappa')
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration
)
return self.cf_template | python | def create_stack_template( self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn("Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization")
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.add_description('Automatically generated with Zappa')
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration
)
return self.cf_template | [
"def",
"create_stack_template",
"(",
"self",
",",
"lambda_arn",
",",
"lambda_name",
",",
"api_key_required",
",",
"iam_authorization",
",",
"authorizer",
",",
"cors_options",
"=",
"None",
",",
"description",
"=",
"None",
",",
"endpoint_configuration",
"=",
"None",
... | Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future. | [
"Build",
"the",
"entire",
"CF",
"stack",
".",
"Just",
"used",
"for",
"the",
"API",
"Gateway",
"but",
"could",
"be",
"expanded",
"in",
"the",
"future",
"."
] | 3ccf7490a8d8b8fa74a61ee39bf44234f3567739 | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2098-L2140 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.