id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
15,200
|
jeongyoonlee/Kaggler
|
kaggler/model/nn.py
|
NN.fprime
|
def fprime(self, w, *args):
"""Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b = np.ones((n, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
w2 = w[-h1:].reshape(h1, 1)
w1 = w[:-h1].reshape(i1, h)
if sparse.issparse(x0):
x0 = x0.tocsr()[idx0]
x1 = x1.tocsr()[idx1]
xb0 = sparse.hstack((x0, b))
xb1 = sparse.hstack((x1, b))
else:
x0 = x0[idx0]
x1 = x1[idx1]
xb0 = np.hstack((x0, b))
xb1 = np.hstack((x1, b))
z0 = np.hstack((sigm(xb0.dot(w1)), b))
z1 = np.hstack((sigm(xb1.dot(w1)), b))
y0 = z0.dot(w2)
y1 = z1.dot(w2)
#e = 1 - sigm(y1 - y0)
#dy = e * dsigm(y1 - y0)
e = 1 - (y1 - y0)
dy = e / n
# Calculate the derivative of the cost function w.r.t. F and w2 where:
# F -- weights between the input and h layers
# w2 -- weights between the h and output layers
dw1 = -(xb1.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb1.dot(w1))) -
xb0.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb0.dot(w1)))
).reshape(i1 * h) + self.l1 * w[:-h1] / (i1 * h)
dw2 = -(z1 - z0).T.dot(dy).reshape(h1) + self.l2 * w[-h1:] / h1
return np.append(dw1, dw2)
|
python
|
def fprime(self, w, *args):
"""Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions
"""
x0 = args[0]
x1 = args[1]
n0 = x0.shape[0]
n1 = x1.shape[0]
# n -- number of pairs to evaluate
n = max(n0, n1) * 10
idx0 = np.random.choice(range(n0), size=n)
idx1 = np.random.choice(range(n1), size=n)
# b -- bias for the input and h layers
b = np.ones((n, 1))
i1 = self.i + 1
h = self.h
h1 = h + 1
w2 = w[-h1:].reshape(h1, 1)
w1 = w[:-h1].reshape(i1, h)
if sparse.issparse(x0):
x0 = x0.tocsr()[idx0]
x1 = x1.tocsr()[idx1]
xb0 = sparse.hstack((x0, b))
xb1 = sparse.hstack((x1, b))
else:
x0 = x0[idx0]
x1 = x1[idx1]
xb0 = np.hstack((x0, b))
xb1 = np.hstack((x1, b))
z0 = np.hstack((sigm(xb0.dot(w1)), b))
z1 = np.hstack((sigm(xb1.dot(w1)), b))
y0 = z0.dot(w2)
y1 = z1.dot(w2)
#e = 1 - sigm(y1 - y0)
#dy = e * dsigm(y1 - y0)
e = 1 - (y1 - y0)
dy = e / n
# Calculate the derivative of the cost function w.r.t. F and w2 where:
# F -- weights between the input and h layers
# w2 -- weights between the h and output layers
dw1 = -(xb1.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb1.dot(w1))) -
xb0.T.dot(dy.dot(w2[:-1].reshape(1, h)) * dsigm(xb0.dot(w1)))
).reshape(i1 * h) + self.l1 * w[:-h1] / (i1 * h)
dw2 = -(z1 - z0).T.dot(dy).reshape(h1) + self.l2 * w[-h1:] / h1
return np.append(dw1, dw2)
|
[
"def",
"fprime",
"(",
"self",
",",
"w",
",",
"*",
"args",
")",
":",
"x0",
"=",
"args",
"[",
"0",
"]",
"x1",
"=",
"args",
"[",
"1",
"]",
"n0",
"=",
"x0",
".",
"shape",
"[",
"0",
"]",
"n1",
"=",
"x1",
".",
"shape",
"[",
"0",
"]",
"# n -- number of pairs to evaluate",
"n",
"=",
"max",
"(",
"n0",
",",
"n1",
")",
"*",
"10",
"idx0",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n0",
")",
",",
"size",
"=",
"n",
")",
"idx1",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"n1",
")",
",",
"size",
"=",
"n",
")",
"# b -- bias for the input and h layers",
"b",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
"1",
")",
")",
"i1",
"=",
"self",
".",
"i",
"+",
"1",
"h",
"=",
"self",
".",
"h",
"h1",
"=",
"h",
"+",
"1",
"w2",
"=",
"w",
"[",
"-",
"h1",
":",
"]",
".",
"reshape",
"(",
"h1",
",",
"1",
")",
"w1",
"=",
"w",
"[",
":",
"-",
"h1",
"]",
".",
"reshape",
"(",
"i1",
",",
"h",
")",
"if",
"sparse",
".",
"issparse",
"(",
"x0",
")",
":",
"x0",
"=",
"x0",
".",
"tocsr",
"(",
")",
"[",
"idx0",
"]",
"x1",
"=",
"x1",
".",
"tocsr",
"(",
")",
"[",
"idx1",
"]",
"xb0",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"x0",
",",
"b",
")",
")",
"xb1",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"x1",
",",
"b",
")",
")",
"else",
":",
"x0",
"=",
"x0",
"[",
"idx0",
"]",
"x1",
"=",
"x1",
"[",
"idx1",
"]",
"xb0",
"=",
"np",
".",
"hstack",
"(",
"(",
"x0",
",",
"b",
")",
")",
"xb1",
"=",
"np",
".",
"hstack",
"(",
"(",
"x1",
",",
"b",
")",
")",
"z0",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"xb0",
".",
"dot",
"(",
"w1",
")",
")",
",",
"b",
")",
")",
"z1",
"=",
"np",
".",
"hstack",
"(",
"(",
"sigm",
"(",
"xb1",
".",
"dot",
"(",
"w1",
")",
")",
",",
"b",
")",
")",
"y0",
"=",
"z0",
".",
"dot",
"(",
"w2",
")",
"y1",
"=",
"z1",
".",
"dot",
"(",
"w2",
")",
"#e = 1 - sigm(y1 - y0)",
"#dy = e * dsigm(y1 - y0)",
"e",
"=",
"1",
"-",
"(",
"y1",
"-",
"y0",
")",
"dy",
"=",
"e",
"/",
"n",
"# Calculate the derivative of the cost function w.r.t. F and w2 where:",
"# F -- weights between the input and h layers",
"# w2 -- weights between the h and output layers",
"dw1",
"=",
"-",
"(",
"xb1",
".",
"T",
".",
"dot",
"(",
"dy",
".",
"dot",
"(",
"w2",
"[",
":",
"-",
"1",
"]",
".",
"reshape",
"(",
"1",
",",
"h",
")",
")",
"*",
"dsigm",
"(",
"xb1",
".",
"dot",
"(",
"w1",
")",
")",
")",
"-",
"xb0",
".",
"T",
".",
"dot",
"(",
"dy",
".",
"dot",
"(",
"w2",
"[",
":",
"-",
"1",
"]",
".",
"reshape",
"(",
"1",
",",
"h",
")",
")",
"*",
"dsigm",
"(",
"xb0",
".",
"dot",
"(",
"w1",
")",
")",
")",
")",
".",
"reshape",
"(",
"i1",
"*",
"h",
")",
"+",
"self",
".",
"l1",
"*",
"w",
"[",
":",
"-",
"h1",
"]",
"/",
"(",
"i1",
"*",
"h",
")",
"dw2",
"=",
"-",
"(",
"z1",
"-",
"z0",
")",
".",
"T",
".",
"dot",
"(",
"dy",
")",
".",
"reshape",
"(",
"h1",
")",
"+",
"self",
".",
"l2",
"*",
"w",
"[",
"-",
"h1",
":",
"]",
"/",
"h1",
"return",
"np",
".",
"append",
"(",
"dw1",
",",
"dw2",
")"
] |
Return the derivatives of the cost function for predictions.
Args:
w (array of float): weight vectors such that:
w[:-h1] -- weights between the input and h layers
w[-h1:] -- weights between the h and output layers
args: features (args[0]) and target (args[1])
Returns:
gradients of the cost function for predictions
|
[
"Return",
"the",
"derivatives",
"of",
"the",
"cost",
"function",
"for",
"predictions",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/model/nn.py#L258-L320
|
15,201
|
jeongyoonlee/Kaggler
|
kaggler/preprocessing/data.py
|
Normalizer._transform_col
|
def _transform_col(self, x, col):
"""Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector.
"""
return norm.ppf(self.ecdfs[col](x) * .998 + .001)
|
python
|
def _transform_col(self, x, col):
"""Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector.
"""
return norm.ppf(self.ecdfs[col](x) * .998 + .001)
|
[
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"col",
")",
":",
"return",
"norm",
".",
"ppf",
"(",
"self",
".",
"ecdfs",
"[",
"col",
"]",
"(",
"x",
")",
"*",
".998",
"+",
".001",
")"
] |
Normalize one numerical column.
Args:
x (numpy.array): a numerical column to normalize
col (int): column index
Returns:
A normalized feature vector.
|
[
"Normalize",
"one",
"numerical",
"column",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L66-L77
|
15,202
|
jeongyoonlee/Kaggler
|
kaggler/preprocessing/data.py
|
LabelEncoder._get_label_encoder_and_max
|
def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label
|
python
|
def _get_label_encoder_and_max(self, x):
"""Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
"""
# NaN cannot be used as a key for dict. So replace it with a random integer.
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
# If every label appears more than min_obs, new label starts from 0.
# Otherwise, new label starts from 1 and 0 is used for all old labels
# that appear less than min_obs.
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label
|
[
"def",
"_get_label_encoder_and_max",
"(",
"self",
",",
"x",
")",
":",
"# NaN cannot be used as a key for dict. So replace it with a random integer.",
"label_count",
"=",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"value_counts",
"(",
")",
"n_uniq",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"label_count",
"=",
"label_count",
"[",
"label_count",
">=",
"self",
".",
"min_obs",
"]",
"n_uniq_new",
"=",
"label_count",
".",
"shape",
"[",
"0",
"]",
"# If every label appears more than min_obs, new label starts from 0.",
"# Otherwise, new label starts from 1 and 0 is used for all old labels",
"# that appear less than min_obs.",
"offset",
"=",
"0",
"if",
"n_uniq",
"==",
"n_uniq_new",
"else",
"1",
"label_encoder",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"arange",
"(",
"n_uniq_new",
")",
"+",
"offset",
",",
"index",
"=",
"label_count",
".",
"index",
")",
"max_label",
"=",
"label_encoder",
".",
"max",
"(",
")",
"label_encoder",
"=",
"label_encoder",
".",
"to_dict",
"(",
")",
"return",
"label_encoder",
",",
"max_label"
] |
Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
|
[
"Return",
"a",
"mapping",
"from",
"values",
"and",
"its",
"maximum",
"of",
"a",
"column",
"to",
"integer",
"labels",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L101-L128
|
15,203
|
jeongyoonlee/Kaggler
|
kaggler/preprocessing/data.py
|
LabelEncoder._transform_col
|
def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
|
python
|
def _transform_col(self, x, i):
"""Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
"""
return x.fillna(NAN_INT).map(self.label_encoders[i]).fillna(0)
|
[
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"return",
"x",
".",
"fillna",
"(",
"NAN_INT",
")",
".",
"map",
"(",
"self",
".",
"label_encoders",
"[",
"i",
"]",
")",
".",
"fillna",
"(",
"0",
")"
] |
Encode one categorical column into labels.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
x (pandas.Series): a column with labels.
|
[
"Encode",
"one",
"categorical",
"column",
"into",
"labels",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L130-L140
|
15,204
|
jeongyoonlee/Kaggler
|
kaggler/preprocessing/data.py
|
OneHotEncoder._transform_col
|
def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None
|
python
|
def _transform_col(self, x, i):
"""Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
"""
labels = self.label_encoder._transform_col(x, i)
label_max = self.label_encoder.label_maxes[i]
# build row and column index for non-zero values of a sparse matrix
index = np.array(range(len(labels)))
i = index[labels > 0]
j = labels[labels > 0] - 1 # column index starts from 0
if len(i) > 0:
return sparse.coo_matrix((np.ones_like(i), (i, j)),
shape=(x.shape[0], label_max))
else:
# if there is no non-zero value, return no matrix
return None
|
[
"def",
"_transform_col",
"(",
"self",
",",
"x",
",",
"i",
")",
":",
"labels",
"=",
"self",
".",
"label_encoder",
".",
"_transform_col",
"(",
"x",
",",
"i",
")",
"label_max",
"=",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
"# build row and column index for non-zero values of a sparse matrix",
"index",
"=",
"np",
".",
"array",
"(",
"range",
"(",
"len",
"(",
"labels",
")",
")",
")",
"i",
"=",
"index",
"[",
"labels",
">",
"0",
"]",
"j",
"=",
"labels",
"[",
"labels",
">",
"0",
"]",
"-",
"1",
"# column index starts from 0",
"if",
"len",
"(",
"i",
")",
">",
"0",
":",
"return",
"sparse",
".",
"coo_matrix",
"(",
"(",
"np",
".",
"ones_like",
"(",
"i",
")",
",",
"(",
"i",
",",
"j",
")",
")",
",",
"shape",
"=",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
",",
"label_max",
")",
")",
"else",
":",
"# if there is no non-zero value, return no matrix",
"return",
"None"
] |
Encode one categorical column into sparse matrix with one-hot-encoding.
Args:
x (pandas.Series): a categorical column to encode
i (int): column index
Returns:
X (scipy.sparse.coo_matrix): sparse matrix encoding a categorical
variable into dummy variables
|
[
"Encode",
"one",
"categorical",
"column",
"into",
"sparse",
"matrix",
"with",
"one",
"-",
"hot",
"-",
"encoding",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L212-L237
|
15,205
|
jeongyoonlee/Kaggler
|
kaggler/preprocessing/data.py
|
OneHotEncoder.transform
|
def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new
|
python
|
def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new
|
[
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"X",
".",
"columns",
")",
":",
"X_col",
"=",
"self",
".",
"_transform_col",
"(",
"X",
"[",
"col",
"]",
",",
"i",
")",
"if",
"X_col",
"is",
"not",
"None",
":",
"if",
"i",
"==",
"0",
":",
"X_new",
"=",
"X_col",
"else",
":",
"X_new",
"=",
"sparse",
".",
"hstack",
"(",
"(",
"X_new",
",",
"X_col",
")",
")",
"logger",
".",
"debug",
"(",
"'{} --> {} features'",
".",
"format",
"(",
"col",
",",
"self",
".",
"label_encoder",
".",
"label_maxes",
"[",
"i",
"]",
")",
")",
"return",
"X_new"
] |
Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
X_new (scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
|
[
"Encode",
"categorical",
"columns",
"into",
"sparse",
"matrix",
"with",
"one",
"-",
"hot",
"-",
"encoding",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/preprocessing/data.py#L244-L267
|
15,206
|
jeongyoonlee/Kaggler
|
kaggler/online_model/DecisionTree/OnlineClassificationTree.py
|
ClassificationTree.predict
|
def predict(self, x):
"""
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
"""
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x)
|
python
|
def predict(self, x):
"""
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
"""
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x)
|
[
"def",
"predict",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"_is_leaf",
"(",
")",
":",
"d1",
"=",
"self",
".",
"predict_initialize",
"[",
"'count_dict'",
"]",
"d2",
"=",
"count_dict",
"(",
"self",
".",
"Y",
")",
"for",
"key",
",",
"value",
"in",
"d1",
".",
"iteritems",
"(",
")",
":",
"if",
"key",
"in",
"d2",
":",
"d2",
"[",
"key",
"]",
"+=",
"value",
"else",
":",
"d2",
"[",
"key",
"]",
"=",
"value",
"return",
"argmax",
"(",
"d2",
")",
"else",
":",
"if",
"self",
".",
"criterion",
"(",
"x",
")",
":",
"return",
"self",
".",
"right",
".",
"predict",
"(",
"x",
")",
"else",
":",
"return",
"self",
".",
"left",
".",
"predict",
"(",
"x",
")"
] |
Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent.
|
[
"Make",
"prediction",
"recursively",
".",
"Use",
"both",
"the",
"samples",
"inside",
"the",
"current",
"node",
"and",
"the",
"statistics",
"inherited",
"from",
"parent",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/online_model/DecisionTree/OnlineClassificationTree.py#L74-L92
|
15,207
|
jeongyoonlee/Kaggler
|
kaggler/ensemble/linear.py
|
netflix
|
def netflix(es, ps, e0, l=.0001):
"""
Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = .5 * (n * e0**2 + (X**2).sum(axis=0) - n * np.array(es)**2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w
|
python
|
def netflix(es, ps, e0, l=.0001):
"""
Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions
"""
m = len(es)
n = len(ps[0])
X = np.stack(ps).T
pTy = .5 * (n * e0**2 + (X**2).sum(axis=0) - n * np.array(es)**2)
w = np.linalg.pinv(X.T.dot(X) + l * n * np.eye(m)).dot(pTy)
return X.dot(w), w
|
[
"def",
"netflix",
"(",
"es",
",",
"ps",
",",
"e0",
",",
"l",
"=",
".0001",
")",
":",
"m",
"=",
"len",
"(",
"es",
")",
"n",
"=",
"len",
"(",
"ps",
"[",
"0",
"]",
")",
"X",
"=",
"np",
".",
"stack",
"(",
"ps",
")",
".",
"T",
"pTy",
"=",
".5",
"*",
"(",
"n",
"*",
"e0",
"**",
"2",
"+",
"(",
"X",
"**",
"2",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"-",
"n",
"*",
"np",
".",
"array",
"(",
"es",
")",
"**",
"2",
")",
"w",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"X",
".",
"T",
".",
"dot",
"(",
"X",
")",
"+",
"l",
"*",
"n",
"*",
"np",
".",
"eye",
"(",
"m",
")",
")",
".",
"dot",
"(",
"pTy",
")",
"return",
"X",
".",
"dot",
"(",
"w",
")",
",",
"w"
] |
Combine predictions with the optimal weights to minimize RMSE.
Args:
es (list of float): RMSEs of predictions
ps (list of np.array): predictions
e0 (float): RMSE of all zero prediction
l (float): lambda as in the ridge regression
Returns:
Ensemble prediction (np.array) and weights (np.array) for input predictions
|
[
"Combine",
"predictions",
"with",
"the",
"optimal",
"weights",
"to",
"minimize",
"RMSE",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/ensemble/linear.py#L7-L28
|
15,208
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
save_data
|
def save_data(X, y, path):
"""Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data.
"""
catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
if y is None:
y = np.zeros((X.shape[0], ))
func(X, y, path)
|
python
|
def save_data(X, y, path):
"""Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data.
"""
catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
if y is None:
y = np.zeros((X.shape[0], ))
func(X, y, path)
|
[
"def",
"save_data",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"catalog",
"=",
"{",
"'.csv'",
":",
"save_csv",
",",
"'.sps'",
":",
"save_libsvm",
",",
"'.h5'",
":",
"save_hdf5",
"}",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"func",
"=",
"catalog",
"[",
"ext",
"]",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
")",
")",
"func",
"(",
"X",
",",
"y",
",",
"path",
")"
] |
Save data as a CSV, LibSVM or HDF5 file based on the file extension.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector. If None, all zero vector will be saved.
path (str): Path to the CSV, LibSVM or HDF5 file to save data.
|
[
"Save",
"data",
"as",
"a",
"CSV",
"LibSVM",
"or",
"HDF5",
"file",
"based",
"on",
"the",
"file",
"extension",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L34-L50
|
15,209
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
save_csv
|
def save_csv(X, y, path):
"""Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
if sparse.issparse(X):
X = X.todense()
np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',')
|
python
|
def save_csv(X, y, path):
"""Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
if sparse.issparse(X):
X = X.todense()
np.savetxt(path, np.hstack((y.reshape((-1, 1)), X)), delimiter=',')
|
[
"def",
"save_csv",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"if",
"sparse",
".",
"issparse",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"todense",
"(",
")",
"np",
".",
"savetxt",
"(",
"path",
",",
"np",
".",
"hstack",
"(",
"(",
"y",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
",",
"X",
")",
")",
",",
"delimiter",
"=",
"','",
")"
] |
Save data as a CSV file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
|
[
"Save",
"data",
"as",
"a",
"CSV",
"file",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L53-L65
|
15,210
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
save_libsvm
|
def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False)
|
python
|
def save_libsvm(X, y, path):
"""Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
"""
dump_svmlight_file(X, y, path, zero_based=False)
|
[
"def",
"save_libsvm",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"dump_svmlight_file",
"(",
"X",
",",
"y",
",",
"path",
",",
"zero_based",
"=",
"False",
")"
] |
Save data as a LibSVM file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the CSV file to save data.
|
[
"Save",
"data",
"as",
"a",
"LibSVM",
"file",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L68-L77
|
15,211
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
save_hdf5
|
def save_hdf5(X, y, path):
"""Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data.
"""
with h5py.File(path, 'w') as f:
is_sparse = 1 if sparse.issparse(X) else 0
f['issparse'] = is_sparse
f['target'] = y
if is_sparse:
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
f['shape'] = np.array(X.shape)
f['data'] = X.data
f['indices'] = X.indices
f['indptr'] = X.indptr
else:
f['data'] = X
|
python
|
def save_hdf5(X, y, path):
"""Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data.
"""
with h5py.File(path, 'w') as f:
is_sparse = 1 if sparse.issparse(X) else 0
f['issparse'] = is_sparse
f['target'] = y
if is_sparse:
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
f['shape'] = np.array(X.shape)
f['data'] = X.data
f['indices'] = X.indices
f['indptr'] = X.indptr
else:
f['data'] = X
|
[
"def",
"save_hdf5",
"(",
"X",
",",
"y",
",",
"path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"is_sparse",
"=",
"1",
"if",
"sparse",
".",
"issparse",
"(",
"X",
")",
"else",
"0",
"f",
"[",
"'issparse'",
"]",
"=",
"is_sparse",
"f",
"[",
"'target'",
"]",
"=",
"y",
"if",
"is_sparse",
":",
"if",
"not",
"sparse",
".",
"isspmatrix_csr",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"tocsr",
"(",
")",
"f",
"[",
"'shape'",
"]",
"=",
"np",
".",
"array",
"(",
"X",
".",
"shape",
")",
"f",
"[",
"'data'",
"]",
"=",
"X",
".",
"data",
"f",
"[",
"'indices'",
"]",
"=",
"X",
".",
"indices",
"f",
"[",
"'indptr'",
"]",
"=",
"X",
".",
"indptr",
"else",
":",
"f",
"[",
"'data'",
"]",
"=",
"X"
] |
Save data as a HDF5 file.
Args:
X (numpy or scipy sparse matrix): Data matrix
y (numpy array): Target vector.
path (str): Path to the HDF5 file to save data.
|
[
"Save",
"data",
"as",
"a",
"HDF5",
"file",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L80-L103
|
15,212
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
load_data
|
def load_data(path, dense=False):
"""Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
X, y = func(path)
if dense and sparse.issparse(X):
X = X.todense()
return X, y
|
python
|
def load_data(path, dense=False):
"""Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
catalog = {'.csv': load_csv, '.sps': load_svmlight_file, '.h5': load_hdf5}
ext = os.path.splitext(path)[1]
func = catalog[ext]
X, y = func(path)
if dense and sparse.issparse(X):
X = X.todense()
return X, y
|
[
"def",
"load_data",
"(",
"path",
",",
"dense",
"=",
"False",
")",
":",
"catalog",
"=",
"{",
"'.csv'",
":",
"load_csv",
",",
"'.sps'",
":",
"load_svmlight_file",
",",
"'.h5'",
":",
"load_hdf5",
"}",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"func",
"=",
"catalog",
"[",
"ext",
"]",
"X",
",",
"y",
"=",
"func",
"(",
"path",
")",
"if",
"dense",
"and",
"sparse",
".",
"issparse",
"(",
"X",
")",
":",
"X",
"=",
"X",
".",
"todense",
"(",
")",
"return",
"X",
",",
"y"
] |
Load data from a CSV, LibSVM or HDF5 file based on the file extension.
Args:
path (str): A path to the CSV, LibSVM or HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
|
[
"Load",
"data",
"from",
"a",
"CSV",
"LibSVM",
"or",
"HDF5",
"file",
"based",
"on",
"the",
"file",
"extension",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L106-L127
|
15,213
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
load_csv
|
def load_csv(path):
"""Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with open(path) as f:
line = f.readline().strip()
X = np.loadtxt(path, delimiter=',',
skiprows=0 if is_number(line.split(',')[0]) else 1)
y = np.array(X[:, 0]).flatten()
X = X[:, 1:]
return X, y
|
python
|
def load_csv(path):
"""Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with open(path) as f:
line = f.readline().strip()
X = np.loadtxt(path, delimiter=',',
skiprows=0 if is_number(line.split(',')[0]) else 1)
y = np.array(X[:, 0]).flatten()
X = X[:, 1:]
return X, y
|
[
"def",
"load_csv",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"X",
"=",
"np",
".",
"loadtxt",
"(",
"path",
",",
"delimiter",
"=",
"','",
",",
"skiprows",
"=",
"0",
"if",
"is_number",
"(",
"line",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
"else",
"1",
")",
"y",
"=",
"np",
".",
"array",
"(",
"X",
"[",
":",
",",
"0",
"]",
")",
".",
"flatten",
"(",
")",
"X",
"=",
"X",
"[",
":",
",",
"1",
":",
"]",
"return",
"X",
",",
"y"
] |
Load data from a CSV file.
Args:
path (str): A path to the CSV format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
|
[
"Load",
"data",
"from",
"a",
"CSV",
"file",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L130-L151
|
15,214
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
load_hdf5
|
def load_hdf5(path):
"""Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with h5py.File(path, 'r') as f:
is_sparse = f['issparse'][...]
if is_sparse:
shape = tuple(f['shape'][...])
data = f['data'][...]
indices = f['indices'][...]
indptr = f['indptr'][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f['data'][...]
y = f['target'][...]
return X, y
|
python
|
def load_hdf5(path):
"""Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
"""
with h5py.File(path, 'r') as f:
is_sparse = f['issparse'][...]
if is_sparse:
shape = tuple(f['shape'][...])
data = f['data'][...]
indices = f['indices'][...]
indptr = f['indptr'][...]
X = sparse.csr_matrix((data, indices, indptr), shape=shape)
else:
X = f['data'][...]
y = f['target'][...]
return X, y
|
[
"def",
"load_hdf5",
"(",
"path",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"is_sparse",
"=",
"f",
"[",
"'issparse'",
"]",
"[",
"...",
"]",
"if",
"is_sparse",
":",
"shape",
"=",
"tuple",
"(",
"f",
"[",
"'shape'",
"]",
"[",
"...",
"]",
")",
"data",
"=",
"f",
"[",
"'data'",
"]",
"[",
"...",
"]",
"indices",
"=",
"f",
"[",
"'indices'",
"]",
"[",
"...",
"]",
"indptr",
"=",
"f",
"[",
"'indptr'",
"]",
"[",
"...",
"]",
"X",
"=",
"sparse",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"indices",
",",
"indptr",
")",
",",
"shape",
"=",
"shape",
")",
"else",
":",
"X",
"=",
"f",
"[",
"'data'",
"]",
"[",
"...",
"]",
"y",
"=",
"f",
"[",
"'target'",
"]",
"[",
"...",
"]",
"return",
"X",
",",
"y"
] |
Load data from a HDF5 file.
Args:
path (str): A path to the HDF5 format file containing data.
dense (boolean): An optional variable indicating if the return matrix
should be dense. By default, it is false.
Returns:
Data matrix X and target vector y
|
[
"Load",
"data",
"from",
"a",
"HDF5",
"file",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L154-L179
|
15,215
|
jeongyoonlee/Kaggler
|
kaggler/data_io.py
|
read_sps
|
def read_sps(path):
"""Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int).
"""
for line in open(path):
# parse x
xs = line.rstrip().split(' ')
yield xs[1:], int(xs[0])
|
python
|
def read_sps(path):
"""Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int).
"""
for line in open(path):
# parse x
xs = line.rstrip().split(' ')
yield xs[1:], int(xs[0])
|
[
"def",
"read_sps",
"(",
"path",
")",
":",
"for",
"line",
"in",
"open",
"(",
"path",
")",
":",
"# parse x",
"xs",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"yield",
"xs",
"[",
"1",
":",
"]",
",",
"int",
"(",
"xs",
"[",
"0",
"]",
")"
] |
Read a LibSVM file line-by-line.
Args:
path (str): A path to the LibSVM file to read.
Yields:
data (list) and target (int).
|
[
"Read",
"a",
"LibSVM",
"file",
"line",
"-",
"by",
"-",
"line",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/data_io.py#L182-L196
|
15,216
|
jeongyoonlee/Kaggler
|
kaggler/metrics/regression.py
|
gini
|
def gini(y, p):
"""Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
"""
# check and get number of samples
assert y.shape == p.shape
n_samples = y.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y, p]).transpose()
true_order = arr[arr[:,0].argsort()][::-1,0]
pred_order = arr[arr[:,1].argsort()][::-1,0]
# get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1/n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# normalize to true Gini coefficient
return g_pred / g_true
|
python
|
def gini(y, p):
"""Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
"""
# check and get number of samples
assert y.shape == p.shape
n_samples = y.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y, p]).transpose()
true_order = arr[arr[:,0].argsort()][::-1,0]
pred_order = arr[arr[:,1].argsort()][::-1,0]
# get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1/n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# normalize to true Gini coefficient
return g_pred / g_true
|
[
"def",
"gini",
"(",
"y",
",",
"p",
")",
":",
"# check and get number of samples",
"assert",
"y",
".",
"shape",
"==",
"p",
".",
"shape",
"n_samples",
"=",
"y",
".",
"shape",
"[",
"0",
"]",
"# sort rows on prediction column",
"# (from largest to smallest)",
"arr",
"=",
"np",
".",
"array",
"(",
"[",
"y",
",",
"p",
"]",
")",
".",
"transpose",
"(",
")",
"true_order",
"=",
"arr",
"[",
"arr",
"[",
":",
",",
"0",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
",",
"0",
"]",
"pred_order",
"=",
"arr",
"[",
"arr",
"[",
":",
",",
"1",
"]",
".",
"argsort",
"(",
")",
"]",
"[",
":",
":",
"-",
"1",
",",
"0",
"]",
"# get Lorenz curves",
"l_true",
"=",
"np",
".",
"cumsum",
"(",
"true_order",
")",
"/",
"np",
".",
"sum",
"(",
"true_order",
")",
"l_pred",
"=",
"np",
".",
"cumsum",
"(",
"pred_order",
")",
"/",
"np",
".",
"sum",
"(",
"pred_order",
")",
"l_ones",
"=",
"np",
".",
"linspace",
"(",
"1",
"/",
"n_samples",
",",
"1",
",",
"n_samples",
")",
"# get Gini coefficients (area between curves)",
"g_true",
"=",
"np",
".",
"sum",
"(",
"l_ones",
"-",
"l_true",
")",
"g_pred",
"=",
"np",
".",
"sum",
"(",
"l_ones",
"-",
"l_pred",
")",
"# normalize to true Gini coefficient",
"return",
"g_pred",
"/",
"g_true"
] |
Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
|
[
"Normalized",
"Gini",
"Coefficient",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/metrics/regression.py#L46-L78
|
15,217
|
jeongyoonlee/Kaggler
|
kaggler/metrics/classification.py
|
logloss
|
def logloss(y, p):
"""Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
"""
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p)
|
python
|
def logloss(y, p):
"""Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
"""
p[p < EPS] = EPS
p[p > 1 - EPS] = 1 - EPS
return log_loss(y, p)
|
[
"def",
"logloss",
"(",
"y",
",",
"p",
")",
":",
"p",
"[",
"p",
"<",
"EPS",
"]",
"=",
"EPS",
"p",
"[",
"p",
">",
"1",
"-",
"EPS",
"]",
"=",
"1",
"-",
"EPS",
"return",
"log_loss",
"(",
"y",
",",
"p",
")"
] |
Bounded log loss error.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
bounded log loss error
|
[
"Bounded",
"log",
"loss",
"error",
"."
] |
20661105b61958dc9a3c529c1d3b2313ab23ae32
|
https://github.com/jeongyoonlee/Kaggler/blob/20661105b61958dc9a3c529c1d3b2313ab23ae32/kaggler/metrics/classification.py#L10-L23
|
15,218
|
vividvilla/csvtotable
|
csvtotable/convert.py
|
convert
|
def convert(input_file_name, **kwargs):
"""Convert CSV file to HTML table"""
delimiter = kwargs["delimiter"] or ","
quotechar = kwargs["quotechar"] or "|"
if six.PY2:
delimiter = delimiter.encode("utf-8")
quotechar = quotechar.encode("utf-8")
# Read CSV and form a header and rows list
with open(input_file_name, "rb") as input_file:
reader = csv.reader(input_file,
encoding="utf-8",
delimiter=delimiter,
quotechar=quotechar)
csv_headers = []
if not kwargs.get("no_header"):
# Read header from first line
csv_headers = next(reader)
csv_rows = [row for row in reader if row]
# Set default column name if header is not present
if not csv_headers and len(csv_rows) > 0:
end = len(csv_rows[0]) + 1
csv_headers = ["Column {}".format(n) for n in range(1, end)]
# Render csv to HTML
html = render_template(csv_headers, csv_rows, **kwargs)
# Freeze all JS files in template
return freeze_js(html)
|
python
|
def convert(input_file_name, **kwargs):
"""Convert CSV file to HTML table"""
delimiter = kwargs["delimiter"] or ","
quotechar = kwargs["quotechar"] or "|"
if six.PY2:
delimiter = delimiter.encode("utf-8")
quotechar = quotechar.encode("utf-8")
# Read CSV and form a header and rows list
with open(input_file_name, "rb") as input_file:
reader = csv.reader(input_file,
encoding="utf-8",
delimiter=delimiter,
quotechar=quotechar)
csv_headers = []
if not kwargs.get("no_header"):
# Read header from first line
csv_headers = next(reader)
csv_rows = [row for row in reader if row]
# Set default column name if header is not present
if not csv_headers and len(csv_rows) > 0:
end = len(csv_rows[0]) + 1
csv_headers = ["Column {}".format(n) for n in range(1, end)]
# Render csv to HTML
html = render_template(csv_headers, csv_rows, **kwargs)
# Freeze all JS files in template
return freeze_js(html)
|
[
"def",
"convert",
"(",
"input_file_name",
",",
"*",
"*",
"kwargs",
")",
":",
"delimiter",
"=",
"kwargs",
"[",
"\"delimiter\"",
"]",
"or",
"\",\"",
"quotechar",
"=",
"kwargs",
"[",
"\"quotechar\"",
"]",
"or",
"\"|\"",
"if",
"six",
".",
"PY2",
":",
"delimiter",
"=",
"delimiter",
".",
"encode",
"(",
"\"utf-8\"",
")",
"quotechar",
"=",
"quotechar",
".",
"encode",
"(",
"\"utf-8\"",
")",
"# Read CSV and form a header and rows list",
"with",
"open",
"(",
"input_file_name",
",",
"\"rb\"",
")",
"as",
"input_file",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"input_file",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"delimiter",
"=",
"delimiter",
",",
"quotechar",
"=",
"quotechar",
")",
"csv_headers",
"=",
"[",
"]",
"if",
"not",
"kwargs",
".",
"get",
"(",
"\"no_header\"",
")",
":",
"# Read header from first line",
"csv_headers",
"=",
"next",
"(",
"reader",
")",
"csv_rows",
"=",
"[",
"row",
"for",
"row",
"in",
"reader",
"if",
"row",
"]",
"# Set default column name if header is not present",
"if",
"not",
"csv_headers",
"and",
"len",
"(",
"csv_rows",
")",
">",
"0",
":",
"end",
"=",
"len",
"(",
"csv_rows",
"[",
"0",
"]",
")",
"+",
"1",
"csv_headers",
"=",
"[",
"\"Column {}\"",
".",
"format",
"(",
"n",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"end",
")",
"]",
"# Render csv to HTML",
"html",
"=",
"render_template",
"(",
"csv_headers",
",",
"csv_rows",
",",
"*",
"*",
"kwargs",
")",
"# Freeze all JS files in template",
"return",
"freeze_js",
"(",
"html",
")"
] |
Convert CSV file to HTML table
|
[
"Convert",
"CSV",
"file",
"to",
"HTML",
"table"
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L36-L68
|
15,219
|
vividvilla/csvtotable
|
csvtotable/convert.py
|
save
|
def save(file_name, content):
"""Save content to a file"""
with open(file_name, "w", encoding="utf-8") as output_file:
output_file.write(content)
return output_file.name
|
python
|
def save(file_name, content):
"""Save content to a file"""
with open(file_name, "w", encoding="utf-8") as output_file:
output_file.write(content)
return output_file.name
|
[
"def",
"save",
"(",
"file_name",
",",
"content",
")",
":",
"with",
"open",
"(",
"file_name",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"content",
")",
"return",
"output_file",
".",
"name"
] |
Save content to a file
|
[
"Save",
"content",
"to",
"a",
"file"
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L71-L75
|
15,220
|
vividvilla/csvtotable
|
csvtotable/convert.py
|
serve
|
def serve(content):
"""Write content to a temp file and serve it in browser"""
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
# Generate a file path with a random name in temporary dir
temp_file_path = os.path.join(temp_folder, temp_file_name)
# save content to temp file
save(temp_file_path, content)
# Open templfile in a browser
webbrowser.open("file://{}".format(temp_file_path))
# Block the thread while content is served
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# cleanup the temp file
os.remove(temp_file_path)
|
python
|
def serve(content):
"""Write content to a temp file and serve it in browser"""
temp_folder = tempfile.gettempdir()
temp_file_name = tempfile.gettempprefix() + str(uuid.uuid4()) + ".html"
# Generate a file path with a random name in temporary dir
temp_file_path = os.path.join(temp_folder, temp_file_name)
# save content to temp file
save(temp_file_path, content)
# Open templfile in a browser
webbrowser.open("file://{}".format(temp_file_path))
# Block the thread while content is served
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
# cleanup the temp file
os.remove(temp_file_path)
|
[
"def",
"serve",
"(",
"content",
")",
":",
"temp_folder",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"temp_file_name",
"=",
"tempfile",
".",
"gettempprefix",
"(",
")",
"+",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"+",
"\".html\"",
"# Generate a file path with a random name in temporary dir",
"temp_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_folder",
",",
"temp_file_name",
")",
"# save content to temp file",
"save",
"(",
"temp_file_path",
",",
"content",
")",
"# Open templfile in a browser",
"webbrowser",
".",
"open",
"(",
"\"file://{}\"",
".",
"format",
"(",
"temp_file_path",
")",
")",
"# Block the thread while content is served",
"try",
":",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"# cleanup the temp file",
"os",
".",
"remove",
"(",
"temp_file_path",
")"
] |
Write content to a temp file and serve it in browser
|
[
"Write",
"content",
"to",
"a",
"temp",
"file",
"and",
"serve",
"it",
"in",
"browser"
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L78-L97
|
15,221
|
vividvilla/csvtotable
|
csvtotable/convert.py
|
render_template
|
def render_template(table_headers, table_items, **options):
"""
Render Jinja2 template
"""
caption = options.get("caption") or "Table"
display_length = options.get("display_length") or -1
height = options.get("height") or "70vh"
default_length_menu = [-1, 10, 25, 50]
pagination = options.get("pagination")
virtual_scroll_limit = options.get("virtual_scroll")
# Change % to vh
height = height.replace("%", "vh")
# Header columns
columns = []
for header in table_headers:
columns.append({"title": header})
# Data table options
datatable_options = {
"columns": columns,
"data": table_items,
"iDisplayLength": display_length,
"sScrollX": "100%",
"sScrollXInner": "100%"
}
# Enable virtual scroll for rows bigger than 1000 rows
is_paging = pagination
virtual_scroll = False
scroll_y = height
if virtual_scroll_limit:
if virtual_scroll_limit != -1 and len(table_items) > virtual_scroll_limit:
virtual_scroll = True
display_length = -1
fmt = ("\nVirtual scroll is enabled since number of rows exceeds {limit}."
" You can set custom row limit by setting flag -vs, --virtual-scroll."
" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.")
logger.warn(fmt.format(limit=virtual_scroll_limit))
if not is_paging:
fmt = "\nPagination can not be disabled in virtual scroll mode."
logger.warn(fmt)
is_paging = True
if is_paging and not virtual_scroll:
# Add display length to the default display length menu
length_menu = []
if display_length != -1:
length_menu = sorted(default_length_menu + [display_length])
else:
length_menu = default_length_menu
# Set label as "All" it display length is -1
length_menu_label = [str("All") if i == -1 else i for i in length_menu]
datatable_options["lengthMenu"] = [length_menu, length_menu_label]
datatable_options["iDisplayLength"] = display_length
if is_paging:
datatable_options["paging"] = True
else:
datatable_options["paging"] = False
if scroll_y:
datatable_options["scrollY"] = scroll_y
if virtual_scroll:
datatable_options["scroller"] = True
datatable_options["bPaginate"] = False
datatable_options["deferRender"] = True
datatable_options["bLengthChange"] = False
enable_export = options.get("export")
if enable_export:
if options["export_options"]:
allowed = list(options["export_options"])
else:
allowed = ["copy", "csv", "json", "print"]
datatable_options["dom"] = "Bfrtip"
datatable_options["buttons"] = allowed
datatable_options_json = json.dumps(datatable_options,
separators=(",", ":"))
return template.render(title=caption or "Table",
caption=caption,
datatable_options=datatable_options_json,
virtual_scroll=virtual_scroll,
enable_export=enable_export)
|
python
|
def render_template(table_headers, table_items, **options):
"""
Render Jinja2 template
"""
caption = options.get("caption") or "Table"
display_length = options.get("display_length") or -1
height = options.get("height") or "70vh"
default_length_menu = [-1, 10, 25, 50]
pagination = options.get("pagination")
virtual_scroll_limit = options.get("virtual_scroll")
# Change % to vh
height = height.replace("%", "vh")
# Header columns
columns = []
for header in table_headers:
columns.append({"title": header})
# Data table options
datatable_options = {
"columns": columns,
"data": table_items,
"iDisplayLength": display_length,
"sScrollX": "100%",
"sScrollXInner": "100%"
}
# Enable virtual scroll for rows bigger than 1000 rows
is_paging = pagination
virtual_scroll = False
scroll_y = height
if virtual_scroll_limit:
if virtual_scroll_limit != -1 and len(table_items) > virtual_scroll_limit:
virtual_scroll = True
display_length = -1
fmt = ("\nVirtual scroll is enabled since number of rows exceeds {limit}."
" You can set custom row limit by setting flag -vs, --virtual-scroll."
" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.")
logger.warn(fmt.format(limit=virtual_scroll_limit))
if not is_paging:
fmt = "\nPagination can not be disabled in virtual scroll mode."
logger.warn(fmt)
is_paging = True
if is_paging and not virtual_scroll:
# Add display length to the default display length menu
length_menu = []
if display_length != -1:
length_menu = sorted(default_length_menu + [display_length])
else:
length_menu = default_length_menu
# Set label as "All" it display length is -1
length_menu_label = [str("All") if i == -1 else i for i in length_menu]
datatable_options["lengthMenu"] = [length_menu, length_menu_label]
datatable_options["iDisplayLength"] = display_length
if is_paging:
datatable_options["paging"] = True
else:
datatable_options["paging"] = False
if scroll_y:
datatable_options["scrollY"] = scroll_y
if virtual_scroll:
datatable_options["scroller"] = True
datatable_options["bPaginate"] = False
datatable_options["deferRender"] = True
datatable_options["bLengthChange"] = False
enable_export = options.get("export")
if enable_export:
if options["export_options"]:
allowed = list(options["export_options"])
else:
allowed = ["copy", "csv", "json", "print"]
datatable_options["dom"] = "Bfrtip"
datatable_options["buttons"] = allowed
datatable_options_json = json.dumps(datatable_options,
separators=(",", ":"))
return template.render(title=caption or "Table",
caption=caption,
datatable_options=datatable_options_json,
virtual_scroll=virtual_scroll,
enable_export=enable_export)
|
[
"def",
"render_template",
"(",
"table_headers",
",",
"table_items",
",",
"*",
"*",
"options",
")",
":",
"caption",
"=",
"options",
".",
"get",
"(",
"\"caption\"",
")",
"or",
"\"Table\"",
"display_length",
"=",
"options",
".",
"get",
"(",
"\"display_length\"",
")",
"or",
"-",
"1",
"height",
"=",
"options",
".",
"get",
"(",
"\"height\"",
")",
"or",
"\"70vh\"",
"default_length_menu",
"=",
"[",
"-",
"1",
",",
"10",
",",
"25",
",",
"50",
"]",
"pagination",
"=",
"options",
".",
"get",
"(",
"\"pagination\"",
")",
"virtual_scroll_limit",
"=",
"options",
".",
"get",
"(",
"\"virtual_scroll\"",
")",
"# Change % to vh",
"height",
"=",
"height",
".",
"replace",
"(",
"\"%\"",
",",
"\"vh\"",
")",
"# Header columns",
"columns",
"=",
"[",
"]",
"for",
"header",
"in",
"table_headers",
":",
"columns",
".",
"append",
"(",
"{",
"\"title\"",
":",
"header",
"}",
")",
"# Data table options",
"datatable_options",
"=",
"{",
"\"columns\"",
":",
"columns",
",",
"\"data\"",
":",
"table_items",
",",
"\"iDisplayLength\"",
":",
"display_length",
",",
"\"sScrollX\"",
":",
"\"100%\"",
",",
"\"sScrollXInner\"",
":",
"\"100%\"",
"}",
"# Enable virtual scroll for rows bigger than 1000 rows",
"is_paging",
"=",
"pagination",
"virtual_scroll",
"=",
"False",
"scroll_y",
"=",
"height",
"if",
"virtual_scroll_limit",
":",
"if",
"virtual_scroll_limit",
"!=",
"-",
"1",
"and",
"len",
"(",
"table_items",
")",
">",
"virtual_scroll_limit",
":",
"virtual_scroll",
"=",
"True",
"display_length",
"=",
"-",
"1",
"fmt",
"=",
"(",
"\"\\nVirtual scroll is enabled since number of rows exceeds {limit}.\"",
"\" You can set custom row limit by setting flag -vs, --virtual-scroll.\"",
"\" Virtual scroll can be disabled by setting the value to -1 and set it to 0 to always enable.\"",
")",
"logger",
".",
"warn",
"(",
"fmt",
".",
"format",
"(",
"limit",
"=",
"virtual_scroll_limit",
")",
")",
"if",
"not",
"is_paging",
":",
"fmt",
"=",
"\"\\nPagination can not be disabled in virtual scroll mode.\"",
"logger",
".",
"warn",
"(",
"fmt",
")",
"is_paging",
"=",
"True",
"if",
"is_paging",
"and",
"not",
"virtual_scroll",
":",
"# Add display length to the default display length menu",
"length_menu",
"=",
"[",
"]",
"if",
"display_length",
"!=",
"-",
"1",
":",
"length_menu",
"=",
"sorted",
"(",
"default_length_menu",
"+",
"[",
"display_length",
"]",
")",
"else",
":",
"length_menu",
"=",
"default_length_menu",
"# Set label as \"All\" it display length is -1",
"length_menu_label",
"=",
"[",
"str",
"(",
"\"All\"",
")",
"if",
"i",
"==",
"-",
"1",
"else",
"i",
"for",
"i",
"in",
"length_menu",
"]",
"datatable_options",
"[",
"\"lengthMenu\"",
"]",
"=",
"[",
"length_menu",
",",
"length_menu_label",
"]",
"datatable_options",
"[",
"\"iDisplayLength\"",
"]",
"=",
"display_length",
"if",
"is_paging",
":",
"datatable_options",
"[",
"\"paging\"",
"]",
"=",
"True",
"else",
":",
"datatable_options",
"[",
"\"paging\"",
"]",
"=",
"False",
"if",
"scroll_y",
":",
"datatable_options",
"[",
"\"scrollY\"",
"]",
"=",
"scroll_y",
"if",
"virtual_scroll",
":",
"datatable_options",
"[",
"\"scroller\"",
"]",
"=",
"True",
"datatable_options",
"[",
"\"bPaginate\"",
"]",
"=",
"False",
"datatable_options",
"[",
"\"deferRender\"",
"]",
"=",
"True",
"datatable_options",
"[",
"\"bLengthChange\"",
"]",
"=",
"False",
"enable_export",
"=",
"options",
".",
"get",
"(",
"\"export\"",
")",
"if",
"enable_export",
":",
"if",
"options",
"[",
"\"export_options\"",
"]",
":",
"allowed",
"=",
"list",
"(",
"options",
"[",
"\"export_options\"",
"]",
")",
"else",
":",
"allowed",
"=",
"[",
"\"copy\"",
",",
"\"csv\"",
",",
"\"json\"",
",",
"\"print\"",
"]",
"datatable_options",
"[",
"\"dom\"",
"]",
"=",
"\"Bfrtip\"",
"datatable_options",
"[",
"\"buttons\"",
"]",
"=",
"allowed",
"datatable_options_json",
"=",
"json",
".",
"dumps",
"(",
"datatable_options",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
"return",
"template",
".",
"render",
"(",
"title",
"=",
"caption",
"or",
"\"Table\"",
",",
"caption",
"=",
"caption",
",",
"datatable_options",
"=",
"datatable_options_json",
",",
"virtual_scroll",
"=",
"virtual_scroll",
",",
"enable_export",
"=",
"enable_export",
")"
] |
Render Jinja2 template
|
[
"Render",
"Jinja2",
"template"
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L100-L193
|
15,222
|
vividvilla/csvtotable
|
csvtotable/convert.py
|
freeze_js
|
def freeze_js(html):
"""
Freeze all JS assets to the rendered html itself.
"""
matches = js_src_pattern.finditer(html)
if not matches:
return html
# Reverse regex matches to replace match string with respective JS content
for match in reversed(tuple(matches)):
# JS file name
file_name = match.group(1)
file_path = os.path.join(js_files_path, file_name)
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# Replace matched string with inline JS
fmt = '<script type="text/javascript">{}</script>'
js_content = fmt.format(file_content)
html = html[:match.start()] + js_content + html[match.end():]
return html
|
python
|
def freeze_js(html):
"""
Freeze all JS assets to the rendered html itself.
"""
matches = js_src_pattern.finditer(html)
if not matches:
return html
# Reverse regex matches to replace match string with respective JS content
for match in reversed(tuple(matches)):
# JS file name
file_name = match.group(1)
file_path = os.path.join(js_files_path, file_name)
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# Replace matched string with inline JS
fmt = '<script type="text/javascript">{}</script>'
js_content = fmt.format(file_content)
html = html[:match.start()] + js_content + html[match.end():]
return html
|
[
"def",
"freeze_js",
"(",
"html",
")",
":",
"matches",
"=",
"js_src_pattern",
".",
"finditer",
"(",
"html",
")",
"if",
"not",
"matches",
":",
"return",
"html",
"# Reverse regex matches to replace match string with respective JS content",
"for",
"match",
"in",
"reversed",
"(",
"tuple",
"(",
"matches",
")",
")",
":",
"# JS file name",
"file_name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"js_files_path",
",",
"file_name",
")",
"with",
"open",
"(",
"file_path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"file_content",
"=",
"f",
".",
"read",
"(",
")",
"# Replace matched string with inline JS",
"fmt",
"=",
"'<script type=\"text/javascript\">{}</script>'",
"js_content",
"=",
"fmt",
".",
"format",
"(",
"file_content",
")",
"html",
"=",
"html",
"[",
":",
"match",
".",
"start",
"(",
")",
"]",
"+",
"js_content",
"+",
"html",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"return",
"html"
] |
Freeze all JS assets to the rendered html itself.
|
[
"Freeze",
"all",
"JS",
"assets",
"to",
"the",
"rendered",
"html",
"itself",
"."
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/convert.py#L196-L218
|
15,223
|
vividvilla/csvtotable
|
csvtotable/cli.py
|
cli
|
def cli(*args, **kwargs):
"""
CSVtoTable commandline utility.
"""
# Convert CSV file
content = convert.convert(kwargs["input_file"], **kwargs)
# Serve the temporary file in browser.
if kwargs["serve"]:
convert.serve(content)
# Write to output file
elif kwargs["output_file"]:
# Check if file can be overwrite
if (not kwargs["overwrite"] and
not prompt_overwrite(kwargs["output_file"])):
raise click.Abort()
convert.save(kwargs["output_file"], content)
click.secho("File converted successfully: {}".format(
kwargs["output_file"]), fg="green")
else:
# If its not server and output file is missing then raise error
raise click.BadOptionUsage("Missing argument \"output_file\".")
|
python
|
def cli(*args, **kwargs):
"""
CSVtoTable commandline utility.
"""
# Convert CSV file
content = convert.convert(kwargs["input_file"], **kwargs)
# Serve the temporary file in browser.
if kwargs["serve"]:
convert.serve(content)
# Write to output file
elif kwargs["output_file"]:
# Check if file can be overwrite
if (not kwargs["overwrite"] and
not prompt_overwrite(kwargs["output_file"])):
raise click.Abort()
convert.save(kwargs["output_file"], content)
click.secho("File converted successfully: {}".format(
kwargs["output_file"]), fg="green")
else:
# If its not server and output file is missing then raise error
raise click.BadOptionUsage("Missing argument \"output_file\".")
|
[
"def",
"cli",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Convert CSV file",
"content",
"=",
"convert",
".",
"convert",
"(",
"kwargs",
"[",
"\"input_file\"",
"]",
",",
"*",
"*",
"kwargs",
")",
"# Serve the temporary file in browser.",
"if",
"kwargs",
"[",
"\"serve\"",
"]",
":",
"convert",
".",
"serve",
"(",
"content",
")",
"# Write to output file",
"elif",
"kwargs",
"[",
"\"output_file\"",
"]",
":",
"# Check if file can be overwrite",
"if",
"(",
"not",
"kwargs",
"[",
"\"overwrite\"",
"]",
"and",
"not",
"prompt_overwrite",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
")",
")",
":",
"raise",
"click",
".",
"Abort",
"(",
")",
"convert",
".",
"save",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
",",
"content",
")",
"click",
".",
"secho",
"(",
"\"File converted successfully: {}\"",
".",
"format",
"(",
"kwargs",
"[",
"\"output_file\"",
"]",
")",
",",
"fg",
"=",
"\"green\"",
")",
"else",
":",
"# If its not server and output file is missing then raise error",
"raise",
"click",
".",
"BadOptionUsage",
"(",
"\"Missing argument \\\"output_file\\\".\"",
")"
] |
CSVtoTable commandline utility.
|
[
"CSVtoTable",
"commandline",
"utility",
"."
] |
d894dca1fcc1071c9a52260a9194f8cc3b327905
|
https://github.com/vividvilla/csvtotable/blob/d894dca1fcc1071c9a52260a9194f8cc3b327905/csvtotable/cli.py#L54-L76
|
15,224
|
django-userena-ce/django-userena-ce
|
userena/views.py
|
activate_retry
|
def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,)))
|
python
|
def activate_retry(request, activation_key,
template_name='userena/activate_retry_success.html',
extra_context=None):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse('userena_activate', args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
else:
return redirect(reverse('userena_activate',args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse('userena_activate',args=(activation_key,)))
|
[
"def",
"activate_retry",
"(",
"request",
",",
"activation_key",
",",
"template_name",
"=",
"'userena/activate_retry_success.html'",
",",
"extra_context",
"=",
"None",
")",
":",
"if",
"not",
"userena_settings",
".",
"USERENA_ACTIVATION_RETRY",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"try",
":",
"if",
"UserenaSignup",
".",
"objects",
".",
"check_expired_activation",
"(",
"activation_key",
")",
":",
"new_key",
"=",
"UserenaSignup",
".",
"objects",
".",
"reissue_activation",
"(",
"activation_key",
")",
"if",
"new_key",
":",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")",
"except",
"UserenaSignup",
".",
"DoesNotExist",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_activate'",
",",
"args",
"=",
"(",
"activation_key",
",",
")",
")",
")"
] |
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
|
[
"Reissue",
"a",
"new",
"activation_key",
"for",
"the",
"user",
"with",
"the",
"expired",
"activation_key",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L227-L267
|
15,225
|
django-userena-ce/django-userena-ce
|
userena/views.py
|
disabled_account
|
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
|
python
|
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
|
[
"def",
"disabled_account",
"(",
"request",
",",
"username",
",",
"template_name",
",",
"extra_context",
"=",
"None",
")",
":",
"user",
"=",
"get_object_or_404",
"(",
"get_user_model",
"(",
")",
",",
"username__iexact",
"=",
"username",
")",
"if",
"user",
".",
"is_active",
":",
"raise",
"Http404",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"'viewed_user'",
"]",
"=",
"user",
"extra_context",
"[",
"'profile'",
"]",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")"
] |
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
|
[
"Checks",
"if",
"the",
"account",
"is",
"disabled",
"if",
"so",
"returns",
"the",
"disabled",
"account",
"template",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L354-L390
|
15,226
|
django-userena-ce/django-userena-ce
|
userena/views.py
|
profile_list
|
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
|
python
|
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return ProfileListView.as_view(queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs)(request)
|
[
"def",
"profile_list",
"(",
"request",
",",
"page",
"=",
"1",
",",
"template_name",
"=",
"'userena/profile_list.html'",
",",
"paginate_by",
"=",
"50",
",",
"extra_context",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"warnings",
".",
"warn",
"(",
"\"views.profile_list is deprecated. Use ProfileListView instead\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"try",
":",
"page",
"=",
"int",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'page'",
",",
"None",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"page",
"=",
"page",
"if",
"userena_settings",
".",
"USERENA_DISABLE_PROFILE_LIST",
"and",
"not",
"request",
".",
"user",
".",
"is_staff",
":",
"raise",
"Http404",
"profile_model",
"=",
"get_profile_model",
"(",
")",
"queryset",
"=",
"profile_model",
".",
"objects",
".",
"get_visible_profiles",
"(",
"request",
".",
"user",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"return",
"ProfileListView",
".",
"as_view",
"(",
"queryset",
"=",
"queryset",
",",
"paginate_by",
"=",
"paginate_by",
",",
"page",
"=",
"page",
",",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
",",
"*",
"*",
"kwargs",
")",
"(",
"request",
")"
] |
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
|
[
"Returns",
"a",
"list",
"of",
"all",
"profiles",
"that",
"are",
"public",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/views.py#L757-L818
|
15,227
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageContactManager.get_or_create
|
def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created)
|
python
|
def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created)
|
[
"def",
"get_or_create",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
":",
"created",
"=",
"False",
"try",
":",
"contact",
"=",
"self",
".",
"get",
"(",
"Q",
"(",
"um_from_user",
"=",
"um_from_user",
",",
"um_to_user",
"=",
"um_to_user",
")",
"|",
"Q",
"(",
"um_from_user",
"=",
"um_to_user",
",",
"um_to_user",
"=",
"um_from_user",
")",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"created",
"=",
"True",
"contact",
"=",
"self",
".",
"create",
"(",
"um_from_user",
"=",
"um_from_user",
",",
"um_to_user",
"=",
"um_to_user",
",",
"latest_message",
"=",
"message",
")",
"return",
"(",
"contact",
",",
"created",
")"
] |
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
|
[
"Get",
"or",
"create",
"a",
"Contact"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L11-L30
|
15,228
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageContactManager.update_contact
|
def update_contact(self, um_from_user, um_to_user, message):
""" Get or update a contacts information """
contact, created = self.get_or_create(um_from_user,
um_to_user,
message)
# If the contact already existed, update the message
if not created:
contact.latest_message = message
contact.save()
return contact
|
python
|
def update_contact(self, um_from_user, um_to_user, message):
""" Get or update a contacts information """
contact, created = self.get_or_create(um_from_user,
um_to_user,
message)
# If the contact already existed, update the message
if not created:
contact.latest_message = message
contact.save()
return contact
|
[
"def",
"update_contact",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
":",
"contact",
",",
"created",
"=",
"self",
".",
"get_or_create",
"(",
"um_from_user",
",",
"um_to_user",
",",
"message",
")",
"# If the contact already existed, update the message",
"if",
"not",
"created",
":",
"contact",
".",
"latest_message",
"=",
"message",
"contact",
".",
"save",
"(",
")",
"return",
"contact"
] |
Get or update a contacts information
|
[
"Get",
"or",
"update",
"a",
"contacts",
"information"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L32-L42
|
15,229
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageContactManager.get_contacts_for
|
def get_contacts_for(self, user):
"""
Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for.
"""
contacts = self.filter(Q(um_from_user=user) | Q(um_to_user=user))
return contacts
|
python
|
def get_contacts_for(self, user):
"""
Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for.
"""
contacts = self.filter(Q(um_from_user=user) | Q(um_to_user=user))
return contacts
|
[
"def",
"get_contacts_for",
"(",
"self",
",",
"user",
")",
":",
"contacts",
"=",
"self",
".",
"filter",
"(",
"Q",
"(",
"um_from_user",
"=",
"user",
")",
"|",
"Q",
"(",
"um_to_user",
"=",
"user",
")",
")",
"return",
"contacts"
] |
Returns the contacts for this user.
Contacts are other users that this user has received messages
from or send messages to.
:param user:
The :class:`User` which to get the contacts for.
|
[
"Returns",
"the",
"contacts",
"for",
"this",
"user",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L44-L56
|
15,230
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageManager.send_message
|
def send_message(self, sender, um_to_user_list, body):
"""
Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message.
"""
msg = self.model(sender=sender,
body=body)
msg.save()
# Save the recipients
msg.save_recipients(um_to_user_list)
msg.update_contacts(um_to_user_list)
signals.email_sent.send(sender=None,msg=msg)
return msg
|
python
|
def send_message(self, sender, um_to_user_list, body):
"""
Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message.
"""
msg = self.model(sender=sender,
body=body)
msg.save()
# Save the recipients
msg.save_recipients(um_to_user_list)
msg.update_contacts(um_to_user_list)
signals.email_sent.send(sender=None,msg=msg)
return msg
|
[
"def",
"send_message",
"(",
"self",
",",
"sender",
",",
"um_to_user_list",
",",
"body",
")",
":",
"msg",
"=",
"self",
".",
"model",
"(",
"sender",
"=",
"sender",
",",
"body",
"=",
"body",
")",
"msg",
".",
"save",
"(",
")",
"# Save the recipients",
"msg",
".",
"save_recipients",
"(",
"um_to_user_list",
")",
"msg",
".",
"update_contacts",
"(",
"um_to_user_list",
")",
"signals",
".",
"email_sent",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"msg",
"=",
"msg",
")",
"return",
"msg"
] |
Send a message from a user, to a user.
:param sender:
The :class:`User` which sends the message.
:param um_to_user_list:
A list which elements are :class:`User` to whom the message is for.
:param message:
String containing the message.
|
[
"Send",
"a",
"message",
"from",
"a",
"user",
"to",
"a",
"user",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L61-L84
|
15,231
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageManager.get_conversation_between
|
def get_conversation_between(self, um_from_user, um_to_user):
""" Returns a conversation between two users """
messages = self.filter(Q(sender=um_from_user, recipients=um_to_user,
sender_deleted_at__isnull=True) |
Q(sender=um_to_user, recipients=um_from_user,
messagerecipient__deleted_at__isnull=True))
return messages
|
python
|
def get_conversation_between(self, um_from_user, um_to_user):
""" Returns a conversation between two users """
messages = self.filter(Q(sender=um_from_user, recipients=um_to_user,
sender_deleted_at__isnull=True) |
Q(sender=um_to_user, recipients=um_from_user,
messagerecipient__deleted_at__isnull=True))
return messages
|
[
"def",
"get_conversation_between",
"(",
"self",
",",
"um_from_user",
",",
"um_to_user",
")",
":",
"messages",
"=",
"self",
".",
"filter",
"(",
"Q",
"(",
"sender",
"=",
"um_from_user",
",",
"recipients",
"=",
"um_to_user",
",",
"sender_deleted_at__isnull",
"=",
"True",
")",
"|",
"Q",
"(",
"sender",
"=",
"um_to_user",
",",
"recipients",
"=",
"um_from_user",
",",
"messagerecipient__deleted_at__isnull",
"=",
"True",
")",
")",
"return",
"messages"
] |
Returns a conversation between two users
|
[
"Returns",
"a",
"conversation",
"between",
"two",
"users"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L86-L92
|
15,232
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageRecipientManager.count_unread_messages_for
|
def count_unread_messages_for(self, user):
"""
Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(user=user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total
|
python
|
def count_unread_messages_for(self, user):
"""
Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(user=user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total
|
[
"def",
"count_unread_messages_for",
"(",
"self",
",",
"user",
")",
":",
"unread_total",
"=",
"self",
".",
"filter",
"(",
"user",
"=",
"user",
",",
"read_at__isnull",
"=",
"True",
",",
"deleted_at__isnull",
"=",
"True",
")",
".",
"count",
"(",
")",
"return",
"unread_total"
] |
Returns the amount of unread messages for this user
:param user:
A Django :class:`User`
:return:
An integer with the amount of unread messages.
|
[
"Returns",
"the",
"amount",
"of",
"unread",
"messages",
"for",
"this",
"user"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L97-L112
|
15,233
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/managers.py
|
MessageRecipientManager.count_unread_messages_between
|
def count_unread_messages_between(self, um_to_user, um_from_user):
"""
Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(message__sender=um_from_user,
user=um_to_user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total
|
python
|
def count_unread_messages_between(self, um_to_user, um_from_user):
"""
Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages.
"""
unread_total = self.filter(message__sender=um_from_user,
user=um_to_user,
read_at__isnull=True,
deleted_at__isnull=True).count()
return unread_total
|
[
"def",
"count_unread_messages_between",
"(",
"self",
",",
"um_to_user",
",",
"um_from_user",
")",
":",
"unread_total",
"=",
"self",
".",
"filter",
"(",
"message__sender",
"=",
"um_from_user",
",",
"user",
"=",
"um_to_user",
",",
"read_at__isnull",
"=",
"True",
",",
"deleted_at__isnull",
"=",
"True",
")",
".",
"count",
"(",
")",
"return",
"unread_total"
] |
Returns the amount of unread messages between two users
:param um_to_user:
A Django :class:`User` for who the messages are for.
:param um_from_user:
A Django :class:`User` from whom the messages originate from.
:return:
An integer with the amount of unread messages.
|
[
"Returns",
"the",
"amount",
"of",
"unread",
"messages",
"between",
"two",
"users"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/managers.py#L114-L133
|
15,234
|
django-userena-ce/django-userena-ce
|
userena/managers.py
|
UserenaManager.reissue_activation
|
def reissue_activation(self, activation_key):
"""
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
"""
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
try:
salt, new_activation_key = generate_sha1(userena.user.username)
userena.activation_key = new_activation_key
userena.save(using=self._db)
userena.user.date_joined = get_datetime_now()
userena.user.save(using=self._db)
userena.send_activation_email()
return True
except Exception:
return False
|
python
|
def reissue_activation(self, activation_key):
"""
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
"""
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
try:
salt, new_activation_key = generate_sha1(userena.user.username)
userena.activation_key = new_activation_key
userena.save(using=self._db)
userena.user.date_joined = get_datetime_now()
userena.user.save(using=self._db)
userena.send_activation_email()
return True
except Exception:
return False
|
[
"def",
"reissue_activation",
"(",
"self",
",",
"activation_key",
")",
":",
"try",
":",
"userena",
"=",
"self",
".",
"get",
"(",
"activation_key",
"=",
"activation_key",
")",
"except",
"self",
".",
"model",
".",
"DoesNotExist",
":",
"return",
"False",
"try",
":",
"salt",
",",
"new_activation_key",
"=",
"generate_sha1",
"(",
"userena",
".",
"user",
".",
"username",
")",
"userena",
".",
"activation_key",
"=",
"new_activation_key",
"userena",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"userena",
".",
"user",
".",
"date_joined",
"=",
"get_datetime_now",
"(",
")",
"userena",
".",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"userena",
".",
"send_activation_email",
"(",
")",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] |
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
|
[
"Creates",
"a",
"new",
"activation_key",
"resetting",
"activation",
"timeframe",
"when",
"users",
"let",
"the",
"previous",
"key",
"expire",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L106-L128
|
15,235
|
django-userena-ce/django-userena-ce
|
userena/managers.py
|
UserenaManager.check_expired_activation
|
def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist
|
python
|
def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist
|
[
"def",
"check_expired_activation",
"(",
"self",
",",
"activation_key",
")",
":",
"if",
"SHA1_RE",
".",
"search",
"(",
"activation_key",
")",
":",
"userena",
"=",
"self",
".",
"get",
"(",
"activation_key",
"=",
"activation_key",
")",
"return",
"userena",
".",
"activation_key_expired",
"(",
")",
"raise",
"self",
".",
"model",
".",
"DoesNotExist"
] |
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
|
[
"Check",
"if",
"activation_key",
"is",
"still",
"valid",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L163-L180
|
15,236
|
django-userena-ce/django-userena-ce
|
userena/managers.py
|
UserenaManager.check_permissions
|
def check_permissions(self):
"""
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
"""
# Variable to supply some feedback
changed_permissions = []
changed_users = []
warnings = []
# Check that all the permissions are available.
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
model_obj = get_profile_model()
else: model_obj = get_user_model()
model_content_type = ContentType.objects.get_for_model(model_obj)
for perm in perms:
try:
Permission.objects.get(codename=perm[0],
content_type=model_content_type)
except Permission.DoesNotExist:
changed_permissions.append(perm[1])
Permission.objects.create(name=perm[1],
codename=perm[0],
content_type=model_content_type)
# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a
# requirement of django-guardian
for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):
try:
user_profile = get_user_profile(user=user)
except ObjectDoesNotExist:
warnings.append(_("No profile found for %(username)s") \
% {'username': user.username})
else:
all_permissions = get_perms(user, user_profile) + get_perms(user, user)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
perm_object = get_user_profile(user=user)
else: perm_object = user
for perm in perms:
if perm[0] not in all_permissions:
assign_perm(perm[0], user, perm_object)
changed_users.append(user)
return (changed_permissions, changed_users, warnings)
|
python
|
def check_permissions(self):
"""
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
"""
# Variable to supply some feedback
changed_permissions = []
changed_users = []
warnings = []
# Check that all the permissions are available.
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
model_obj = get_profile_model()
else: model_obj = get_user_model()
model_content_type = ContentType.objects.get_for_model(model_obj)
for perm in perms:
try:
Permission.objects.get(codename=perm[0],
content_type=model_content_type)
except Permission.DoesNotExist:
changed_permissions.append(perm[1])
Permission.objects.create(name=perm[1],
codename=perm[0],
content_type=model_content_type)
# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a
# requirement of django-guardian
for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):
try:
user_profile = get_user_profile(user=user)
except ObjectDoesNotExist:
warnings.append(_("No profile found for %(username)s") \
% {'username': user.username})
else:
all_permissions = get_perms(user, user_profile) + get_perms(user, user)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
perm_object = get_user_profile(user=user)
else: perm_object = user
for perm in perms:
if perm[0] not in all_permissions:
assign_perm(perm[0], user, perm_object)
changed_users.append(user)
return (changed_permissions, changed_users, warnings)
|
[
"def",
"check_permissions",
"(",
"self",
")",
":",
"# Variable to supply some feedback",
"changed_permissions",
"=",
"[",
"]",
"changed_users",
"=",
"[",
"]",
"warnings",
"=",
"[",
"]",
"# Check that all the permissions are available.",
"for",
"model",
",",
"perms",
"in",
"ASSIGNED_PERMISSIONS",
".",
"items",
"(",
")",
":",
"if",
"model",
"==",
"'profile'",
":",
"model_obj",
"=",
"get_profile_model",
"(",
")",
"else",
":",
"model_obj",
"=",
"get_user_model",
"(",
")",
"model_content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"model_obj",
")",
"for",
"perm",
"in",
"perms",
":",
"try",
":",
"Permission",
".",
"objects",
".",
"get",
"(",
"codename",
"=",
"perm",
"[",
"0",
"]",
",",
"content_type",
"=",
"model_content_type",
")",
"except",
"Permission",
".",
"DoesNotExist",
":",
"changed_permissions",
".",
"append",
"(",
"perm",
"[",
"1",
"]",
")",
"Permission",
".",
"objects",
".",
"create",
"(",
"name",
"=",
"perm",
"[",
"1",
"]",
",",
"codename",
"=",
"perm",
"[",
"0",
"]",
",",
"content_type",
"=",
"model_content_type",
")",
"# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a",
"# requirement of django-guardian",
"for",
"user",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"exclude",
"(",
"username",
"=",
"settings",
".",
"ANONYMOUS_USER_NAME",
")",
":",
"try",
":",
"user_profile",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"except",
"ObjectDoesNotExist",
":",
"warnings",
".",
"append",
"(",
"_",
"(",
"\"No profile found for %(username)s\"",
")",
"%",
"{",
"'username'",
":",
"user",
".",
"username",
"}",
")",
"else",
":",
"all_permissions",
"=",
"get_perms",
"(",
"user",
",",
"user_profile",
")",
"+",
"get_perms",
"(",
"user",
",",
"user",
")",
"for",
"model",
",",
"perms",
"in",
"ASSIGNED_PERMISSIONS",
".",
"items",
"(",
")",
":",
"if",
"model",
"==",
"'profile'",
":",
"perm_object",
"=",
"get_user_profile",
"(",
"user",
"=",
"user",
")",
"else",
":",
"perm_object",
"=",
"user",
"for",
"perm",
"in",
"perms",
":",
"if",
"perm",
"[",
"0",
"]",
"not",
"in",
"all_permissions",
":",
"assign_perm",
"(",
"perm",
"[",
"0",
"]",
",",
"user",
",",
"perm_object",
")",
"changed_users",
".",
"append",
"(",
"user",
")",
"return",
"(",
"changed_permissions",
",",
"changed_users",
",",
"warnings",
")"
] |
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
|
[
"Checks",
"that",
"all",
"permissions",
"are",
"set",
"correctly",
"for",
"the",
"users",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/managers.py#L236-L287
|
15,237
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/templatetags/umessages_tags.py
|
get_unread_message_count_for
|
def get_unread_message_count_for(parser, token):
"""
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
user, var_name = m.groups()
return MessageCount(user, var_name)
|
python
|
def get_unread_message_count_for(parser, token):
"""
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
user, var_name = m.groups()
return MessageCount(user, var_name)
|
[
"def",
"get_unread_message_count_for",
"(",
"parser",
",",
"token",
")",
":",
"try",
":",
"tag_name",
",",
"arg",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag requires arguments\"",
"%",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'(.*?) as (\\w+)'",
",",
"arg",
")",
"if",
"not",
"m",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag had invalid arguments\"",
"%",
"tag_name",
")",
"user",
",",
"var_name",
"=",
"m",
".",
"groups",
"(",
")",
"return",
"MessageCount",
"(",
"user",
",",
"var_name",
")"
] |
Returns the unread message count for a user.
Syntax::
{% get_unread_message_count_for [user] as [var_name] %}
Example usage::
{% get_unread_message_count_for pero as message_count %}
|
[
"Returns",
"the",
"unread",
"message",
"count",
"for",
"a",
"user",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/templatetags/umessages_tags.py#L40-L61
|
15,238
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/templatetags/umessages_tags.py
|
get_unread_message_count_between
|
def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) and (.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user)
|
python
|
def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) and (.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user)
|
[
"def",
"get_unread_message_count_between",
"(",
"parser",
",",
"token",
")",
":",
"try",
":",
"tag_name",
",",
"arg",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag requires arguments\"",
"%",
"token",
".",
"contents",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"m",
"=",
"re",
".",
"search",
"(",
"r'(.*?) and (.*?) as (\\w+)'",
",",
"arg",
")",
"if",
"not",
"m",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"%s tag had invalid arguments\"",
"%",
"tag_name",
")",
"um_from_user",
",",
"um_to_user",
",",
"var_name",
"=",
"m",
".",
"groups",
"(",
")",
"return",
"MessageCount",
"(",
"um_from_user",
",",
"var_name",
",",
"um_to_user",
")"
] |
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
|
[
"Returns",
"the",
"unread",
"message",
"count",
"between",
"two",
"users",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/templatetags/umessages_tags.py#L64-L85
|
15,239
|
django-userena-ce/django-userena-ce
|
userena/models.py
|
upload_to_mugshot
|
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension}
|
python
|
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension}
|
[
"def",
"upload_to_mugshot",
"(",
"instance",
",",
"filename",
")",
":",
"extension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"salt",
",",
"hash",
"=",
"generate_sha1",
"(",
"instance",
".",
"pk",
")",
"path",
"=",
"userena_settings",
".",
"USERENA_MUGSHOT_PATH",
"%",
"{",
"'username'",
":",
"instance",
".",
"user",
".",
"username",
",",
"'id'",
":",
"instance",
".",
"user",
".",
"id",
",",
"'date'",
":",
"instance",
".",
"user",
".",
"date_joined",
",",
"'date_now'",
":",
"get_datetime_now",
"(",
")",
".",
"date",
"(",
")",
"}",
"return",
"'%(path)s%(hash)s.%(extension)s'",
"%",
"{",
"'path'",
":",
"path",
",",
"'hash'",
":",
"hash",
"[",
":",
"10",
"]",
",",
"'extension'",
":",
"extension",
"}"
] |
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
|
[
"Uploads",
"a",
"mugshot",
"for",
"a",
"user",
"to",
"the",
"USERENA_MUGSHOT_PATH",
"and",
"saving",
"it",
"under",
"unique",
"hash",
"for",
"the",
"image",
".",
"This",
"is",
"for",
"privacy",
"reasons",
"so",
"others",
"can",
"t",
"just",
"browse",
"through",
"the",
"mugshot",
"directory",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/models.py#L24-L39
|
15,240
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/views.py
|
message_compose
|
def message_compose(request, recipients=None, compose_form=ComposeForm,
success_url=None, template_name="umessages/message_form.html",
recipient_filter=None, extra_context=None):
"""
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
"""
initial_data = dict()
if recipients:
username_list = [r.strip() for r in recipients.split("+")]
recipients = [u for u in get_user_model().objects.filter(username__in=username_list)]
initial_data["to"] = recipients
form = compose_form(initial=initial_data)
if request.method == "POST":
form = compose_form(request.POST)
if form.is_valid():
requested_redirect = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
message = form.save(request.user)
recipients = form.cleaned_data['to']
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Message is sent.'),
fail_silently=True)
# Redirect mechanism
redirect_to = reverse('userena_umessages_list')
if requested_redirect: redirect_to = requested_redirect
elif success_url: redirect_to = success_url
elif len(recipients) == 1:
redirect_to = reverse('userena_umessages_detail',
kwargs={'username': recipients[0].username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context["form"] = form
extra_context["recipients"] = recipients
return render(request, template_name, extra_context)
|
python
|
def message_compose(request, recipients=None, compose_form=ComposeForm,
success_url=None, template_name="umessages/message_form.html",
recipient_filter=None, extra_context=None):
"""
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
"""
initial_data = dict()
if recipients:
username_list = [r.strip() for r in recipients.split("+")]
recipients = [u for u in get_user_model().objects.filter(username__in=username_list)]
initial_data["to"] = recipients
form = compose_form(initial=initial_data)
if request.method == "POST":
form = compose_form(request.POST)
if form.is_valid():
requested_redirect = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
message = form.save(request.user)
recipients = form.cleaned_data['to']
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Message is sent.'),
fail_silently=True)
# Redirect mechanism
redirect_to = reverse('userena_umessages_list')
if requested_redirect: redirect_to = requested_redirect
elif success_url: redirect_to = success_url
elif len(recipients) == 1:
redirect_to = reverse('userena_umessages_detail',
kwargs={'username': recipients[0].username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context["form"] = form
extra_context["recipients"] = recipients
return render(request, template_name, extra_context)
|
[
"def",
"message_compose",
"(",
"request",
",",
"recipients",
"=",
"None",
",",
"compose_form",
"=",
"ComposeForm",
",",
"success_url",
"=",
"None",
",",
"template_name",
"=",
"\"umessages/message_form.html\"",
",",
"recipient_filter",
"=",
"None",
",",
"extra_context",
"=",
"None",
")",
":",
"initial_data",
"=",
"dict",
"(",
")",
"if",
"recipients",
":",
"username_list",
"=",
"[",
"r",
".",
"strip",
"(",
")",
"for",
"r",
"in",
"recipients",
".",
"split",
"(",
"\"+\"",
")",
"]",
"recipients",
"=",
"[",
"u",
"for",
"u",
"in",
"get_user_model",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"username__in",
"=",
"username_list",
")",
"]",
"initial_data",
"[",
"\"to\"",
"]",
"=",
"recipients",
"form",
"=",
"compose_form",
"(",
"initial",
"=",
"initial_data",
")",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"form",
"=",
"compose_form",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"requested_redirect",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"request",
".",
"POST",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"False",
")",
")",
"message",
"=",
"form",
".",
"save",
"(",
"request",
".",
"user",
")",
"recipients",
"=",
"form",
".",
"cleaned_data",
"[",
"'to'",
"]",
"if",
"userena_settings",
".",
"USERENA_USE_MESSAGES",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"'Message is sent.'",
")",
",",
"fail_silently",
"=",
"True",
")",
"# Redirect mechanism",
"redirect_to",
"=",
"reverse",
"(",
"'userena_umessages_list'",
")",
"if",
"requested_redirect",
":",
"redirect_to",
"=",
"requested_redirect",
"elif",
"success_url",
":",
"redirect_to",
"=",
"success_url",
"elif",
"len",
"(",
"recipients",
")",
"==",
"1",
":",
"redirect_to",
"=",
"reverse",
"(",
"'userena_umessages_detail'",
",",
"kwargs",
"=",
"{",
"'username'",
":",
"recipients",
"[",
"0",
"]",
".",
"username",
"}",
")",
"return",
"redirect",
"(",
"redirect_to",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"\"form\"",
"]",
"=",
"form",
"extra_context",
"[",
"\"recipients\"",
"]",
"=",
"recipients",
"return",
"render",
"(",
"request",
",",
"template_name",
",",
"extra_context",
")"
] |
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
|
[
"Compose",
"a",
"new",
"message"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/views.py#L73-L141
|
15,241
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/views.py
|
message_remove
|
def message_remove(request, undo=False):
"""
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
"""
message_pks = request.POST.getlist('message_pks')
redirect_to = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
if message_pks:
# Check that all values are integers.
valid_message_pk_list = set()
for pk in message_pks:
try: valid_pk = int(pk)
except (TypeError, ValueError): pass
else:
valid_message_pk_list.add(valid_pk)
# Delete all the messages, if they belong to the user.
now = get_datetime_now()
changed_message_list = set()
for pk in valid_message_pk_list:
message = get_object_or_404(Message, pk=pk)
# Check if the user is the owner
if message.sender == request.user:
if undo:
message.sender_deleted_at = None
else:
message.sender_deleted_at = now
message.save()
changed_message_list.add(message.pk)
# Check if the user is a recipient of the message
if request.user in message.recipients.all():
mr = message.messagerecipient_set.get(user=request.user,
message=message)
if undo:
mr.deleted_at = None
else:
mr.deleted_at = now
mr.save()
changed_message_list.add(message.pk)
# Send messages
if (len(changed_message_list) > 0) and userena_settings.USERENA_USE_MESSAGES:
if undo:
message = ungettext('Message is succesfully restored.',
'Messages are succesfully restored.',
len(changed_message_list))
else:
message = ungettext('Message is successfully removed.',
'Messages are successfully removed.',
len(changed_message_list))
messages.success(request, message, fail_silently=True)
if redirect_to: return redirect(redirect_to)
else: return redirect(reverse('userena_umessages_list'))
|
python
|
def message_remove(request, undo=False):
"""
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
"""
message_pks = request.POST.getlist('message_pks')
redirect_to = request.GET.get(REDIRECT_FIELD_NAME,
request.POST.get(REDIRECT_FIELD_NAME, False))
if message_pks:
# Check that all values are integers.
valid_message_pk_list = set()
for pk in message_pks:
try: valid_pk = int(pk)
except (TypeError, ValueError): pass
else:
valid_message_pk_list.add(valid_pk)
# Delete all the messages, if they belong to the user.
now = get_datetime_now()
changed_message_list = set()
for pk in valid_message_pk_list:
message = get_object_or_404(Message, pk=pk)
# Check if the user is the owner
if message.sender == request.user:
if undo:
message.sender_deleted_at = None
else:
message.sender_deleted_at = now
message.save()
changed_message_list.add(message.pk)
# Check if the user is a recipient of the message
if request.user in message.recipients.all():
mr = message.messagerecipient_set.get(user=request.user,
message=message)
if undo:
mr.deleted_at = None
else:
mr.deleted_at = now
mr.save()
changed_message_list.add(message.pk)
# Send messages
if (len(changed_message_list) > 0) and userena_settings.USERENA_USE_MESSAGES:
if undo:
message = ungettext('Message is succesfully restored.',
'Messages are succesfully restored.',
len(changed_message_list))
else:
message = ungettext('Message is successfully removed.',
'Messages are successfully removed.',
len(changed_message_list))
messages.success(request, message, fail_silently=True)
if redirect_to: return redirect(redirect_to)
else: return redirect(reverse('userena_umessages_list'))
|
[
"def",
"message_remove",
"(",
"request",
",",
"undo",
"=",
"False",
")",
":",
"message_pks",
"=",
"request",
".",
"POST",
".",
"getlist",
"(",
"'message_pks'",
")",
"redirect_to",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"request",
".",
"POST",
".",
"get",
"(",
"REDIRECT_FIELD_NAME",
",",
"False",
")",
")",
"if",
"message_pks",
":",
"# Check that all values are integers.",
"valid_message_pk_list",
"=",
"set",
"(",
")",
"for",
"pk",
"in",
"message_pks",
":",
"try",
":",
"valid_pk",
"=",
"int",
"(",
"pk",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"else",
":",
"valid_message_pk_list",
".",
"add",
"(",
"valid_pk",
")",
"# Delete all the messages, if they belong to the user.",
"now",
"=",
"get_datetime_now",
"(",
")",
"changed_message_list",
"=",
"set",
"(",
")",
"for",
"pk",
"in",
"valid_message_pk_list",
":",
"message",
"=",
"get_object_or_404",
"(",
"Message",
",",
"pk",
"=",
"pk",
")",
"# Check if the user is the owner",
"if",
"message",
".",
"sender",
"==",
"request",
".",
"user",
":",
"if",
"undo",
":",
"message",
".",
"sender_deleted_at",
"=",
"None",
"else",
":",
"message",
".",
"sender_deleted_at",
"=",
"now",
"message",
".",
"save",
"(",
")",
"changed_message_list",
".",
"add",
"(",
"message",
".",
"pk",
")",
"# Check if the user is a recipient of the message",
"if",
"request",
".",
"user",
"in",
"message",
".",
"recipients",
".",
"all",
"(",
")",
":",
"mr",
"=",
"message",
".",
"messagerecipient_set",
".",
"get",
"(",
"user",
"=",
"request",
".",
"user",
",",
"message",
"=",
"message",
")",
"if",
"undo",
":",
"mr",
".",
"deleted_at",
"=",
"None",
"else",
":",
"mr",
".",
"deleted_at",
"=",
"now",
"mr",
".",
"save",
"(",
")",
"changed_message_list",
".",
"add",
"(",
"message",
".",
"pk",
")",
"# Send messages",
"if",
"(",
"len",
"(",
"changed_message_list",
")",
">",
"0",
")",
"and",
"userena_settings",
".",
"USERENA_USE_MESSAGES",
":",
"if",
"undo",
":",
"message",
"=",
"ungettext",
"(",
"'Message is succesfully restored.'",
",",
"'Messages are succesfully restored.'",
",",
"len",
"(",
"changed_message_list",
")",
")",
"else",
":",
"message",
"=",
"ungettext",
"(",
"'Message is successfully removed.'",
",",
"'Messages are successfully removed.'",
",",
"len",
"(",
"changed_message_list",
")",
")",
"messages",
".",
"success",
"(",
"request",
",",
"message",
",",
"fail_silently",
"=",
"True",
")",
"if",
"redirect_to",
":",
"return",
"redirect",
"(",
"redirect_to",
")",
"else",
":",
"return",
"redirect",
"(",
"reverse",
"(",
"'userena_umessages_list'",
")",
")"
] |
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
|
[
"A",
"POST",
"to",
"remove",
"messages",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/views.py#L145-L217
|
15,242
|
django-userena-ce/django-userena-ce
|
demo/profiles/forms.py
|
SignupFormExtra.save
|
def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user
|
python
|
def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user
|
[
"def",
"save",
"(",
"self",
")",
":",
"# First save the parent form and get the user.",
"new_user",
"=",
"super",
"(",
"SignupFormExtra",
",",
"self",
")",
".",
"save",
"(",
")",
"new_user",
".",
"first_name",
"=",
"self",
".",
"cleaned_data",
"[",
"'first_name'",
"]",
"new_user",
".",
"last_name",
"=",
"self",
".",
"cleaned_data",
"[",
"'last_name'",
"]",
"new_user",
".",
"save",
"(",
")",
"# Userena expects to get the new user from this form, so return the new",
"# user.",
"return",
"new_user"
] |
Override the save method to save the first and last name to the user
field.
|
[
"Override",
"the",
"save",
"method",
"to",
"save",
"the",
"first",
"and",
"last",
"name",
"to",
"the",
"user",
"field",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/demo/profiles/forms.py#L38-L53
|
15,243
|
django-userena-ce/django-userena-ce
|
userena/contrib/umessages/forms.py
|
ComposeForm.save
|
def save(self, sender):
"""
Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`.
"""
um_to_user_list = self.cleaned_data['to']
body = self.cleaned_data['body']
msg = Message.objects.send_message(sender,
um_to_user_list,
body)
return msg
|
python
|
def save(self, sender):
"""
Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`.
"""
um_to_user_list = self.cleaned_data['to']
body = self.cleaned_data['body']
msg = Message.objects.send_message(sender,
um_to_user_list,
body)
return msg
|
[
"def",
"save",
"(",
"self",
",",
"sender",
")",
":",
"um_to_user_list",
"=",
"self",
".",
"cleaned_data",
"[",
"'to'",
"]",
"body",
"=",
"self",
".",
"cleaned_data",
"[",
"'body'",
"]",
"msg",
"=",
"Message",
".",
"objects",
".",
"send_message",
"(",
"sender",
",",
"um_to_user_list",
",",
"body",
")",
"return",
"msg"
] |
Save the message and send it out into the wide world.
:param sender:
The :class:`User` that sends the message.
:param parent_msg:
The :class:`Message` that preceded this message in the thread.
:return: The saved :class:`Message`.
|
[
"Save",
"the",
"message",
"and",
"send",
"it",
"out",
"into",
"the",
"wide",
"world",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/contrib/umessages/forms.py#L15-L35
|
15,244
|
django-userena-ce/django-userena-ce
|
userena/forms.py
|
SignupForm.clean_username
|
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username__iexact=self.cleaned_data['username'])
except get_user_model().DoesNotExist:
pass
else:
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(user__username__iexact=self.cleaned_data['username']).exclude(activation_key=userena_settings.USERENA_ACTIVATED):
raise forms.ValidationError(_('This username is already taken but not confirmed. Please check your email for verification steps.'))
raise forms.ValidationError(_('This username is already taken.'))
if self.cleaned_data['username'].lower() in userena_settings.USERENA_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_('This username is not allowed.'))
return self.cleaned_data['username']
|
python
|
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
"""
try:
user = get_user_model().objects.get(username__iexact=self.cleaned_data['username'])
except get_user_model().DoesNotExist:
pass
else:
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(user__username__iexact=self.cleaned_data['username']).exclude(activation_key=userena_settings.USERENA_ACTIVATED):
raise forms.ValidationError(_('This username is already taken but not confirmed. Please check your email for verification steps.'))
raise forms.ValidationError(_('This username is already taken.'))
if self.cleaned_data['username'].lower() in userena_settings.USERENA_FORBIDDEN_USERNAMES:
raise forms.ValidationError(_('This username is not allowed.'))
return self.cleaned_data['username']
|
[
"def",
"clean_username",
"(",
"self",
")",
":",
"try",
":",
"user",
"=",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"username__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
")",
"except",
"get_user_model",
"(",
")",
".",
"DoesNotExist",
":",
"pass",
"else",
":",
"if",
"userena_settings",
".",
"USERENA_ACTIVATION_REQUIRED",
"and",
"UserenaSignup",
".",
"objects",
".",
"filter",
"(",
"user__username__iexact",
"=",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
")",
".",
"exclude",
"(",
"activation_key",
"=",
"userena_settings",
".",
"USERENA_ACTIVATED",
")",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is already taken but not confirmed. Please check your email for verification steps.'",
")",
")",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is already taken.'",
")",
")",
"if",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
".",
"lower",
"(",
")",
"in",
"userena_settings",
".",
"USERENA_FORBIDDEN_USERNAMES",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'This username is not allowed.'",
")",
")",
"return",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]"
] |
Validate that the username is alphanumeric and is not already in use.
Also validates that the username is not listed in
``USERENA_FORBIDDEN_USERNAMES`` list.
|
[
"Validate",
"that",
"the",
"username",
"is",
"alphanumeric",
"and",
"is",
"not",
"already",
"in",
"use",
".",
"Also",
"validates",
"that",
"the",
"username",
"is",
"not",
"listed",
"in",
"USERENA_FORBIDDEN_USERNAMES",
"list",
"."
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/forms.py#L44-L61
|
15,245
|
django-userena-ce/django-userena-ce
|
userena/forms.py
|
SignupFormOnlyEmail.save
|
def save(self):
""" Generate a random username before falling back to parent signup form """
while True:
username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
try:
get_user_model().objects.get(username__iexact=username)
except get_user_model().DoesNotExist: break
self.cleaned_data['username'] = username
return super(SignupFormOnlyEmail, self).save()
|
python
|
def save(self):
""" Generate a random username before falling back to parent signup form """
while True:
username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
try:
get_user_model().objects.get(username__iexact=username)
except get_user_model().DoesNotExist: break
self.cleaned_data['username'] = username
return super(SignupFormOnlyEmail, self).save()
|
[
"def",
"save",
"(",
"self",
")",
":",
"while",
"True",
":",
"username",
"=",
"sha1",
"(",
"str",
"(",
"random",
".",
"random",
"(",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
":",
"5",
"]",
"try",
":",
"get_user_model",
"(",
")",
".",
"objects",
".",
"get",
"(",
"username__iexact",
"=",
"username",
")",
"except",
"get_user_model",
"(",
")",
".",
"DoesNotExist",
":",
"break",
"self",
".",
"cleaned_data",
"[",
"'username'",
"]",
"=",
"username",
"return",
"super",
"(",
"SignupFormOnlyEmail",
",",
"self",
")",
".",
"save",
"(",
")"
] |
Generate a random username before falling back to parent signup form
|
[
"Generate",
"a",
"random",
"username",
"before",
"falling",
"back",
"to",
"parent",
"signup",
"form"
] |
2d8b745eed25128134e961ca96c270802e730256
|
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/forms.py#L110-L119
|
15,246
|
dogoncouch/logdissect
|
logdissect/parsers/linejson.py
|
ParseModule.parse_file
|
def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
python
|
def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
[
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Open input file and read JSON array:",
"with",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"jsonlist",
"=",
"logfile",
".",
"readlines",
"(",
")",
"# Set our attributes for this entry and add it to data.entries:",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"for",
"line",
"in",
"jsonlist",
":",
"entry",
"=",
"self",
".",
"parse_line",
"(",
"line",
")",
"data",
"[",
"'entries'",
"]",
".",
"append",
"(",
"entry",
")",
"if",
"self",
".",
"tzone",
":",
"for",
"e",
"in",
"data",
"[",
"'entries'",
"]",
":",
"e",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"# Return the parsed data",
"return",
"data"
] |
Parse an object-per-line JSON file into a log data dict
|
[
"Parse",
"an",
"object",
"-",
"per",
"-",
"line",
"JSON",
"file",
"into",
"a",
"log",
"data",
"dict"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/linejson.py#L41-L59
|
15,247
|
dogoncouch/logdissect
|
logdissect/parsers/sojson.py
|
ParseModule.parse_file
|
def parse_file(self, sourcepath):
"""Parse single JSON object into a LogData object"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonstr = logfile.read()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = json.loads(jsonstr)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
python
|
def parse_file(self, sourcepath):
"""Parse single JSON object into a LogData object"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonstr = logfile.read()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = json.loads(jsonstr)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data
|
[
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Open input file and read JSON array:",
"with",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"jsonstr",
"=",
"logfile",
".",
"read",
"(",
")",
"# Set our attributes for this entry and add it to data.entries:",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"json",
".",
"loads",
"(",
"jsonstr",
")",
"if",
"self",
".",
"tzone",
":",
"for",
"e",
"in",
"data",
"[",
"'entries'",
"]",
":",
"e",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"# Return the parsed data",
"return",
"data"
] |
Parse single JSON object into a LogData object
|
[
"Parse",
"single",
"JSON",
"object",
"into",
"a",
"LogData",
"object"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/sojson.py#L41-L56
|
15,248
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.run_job
|
def run_job(self):
"""Execute a logdissect job"""
try:
self.load_parsers()
self.load_filters()
self.load_outputs()
self.config_args()
if self.args.list_parsers:
self.list_parsers()
if self.args.verbosemode: print('Loading input files')
self.load_inputs()
if self.args.verbosemode: print('Running parsers')
self.run_parse()
if self.args.verbosemode: print('Merging data')
self.data_set['finalized_data'] = \
logdissect.utils.merge_logs(
self.data_set['data_set'], sort=True)
if self.args.verbosemode: print('Running filters')
self.run_filters()
if self.args.verbosemode: print('Running output')
self.run_output()
except KeyboardInterrupt:
sys.exit(1)
|
python
|
def run_job(self):
"""Execute a logdissect job"""
try:
self.load_parsers()
self.load_filters()
self.load_outputs()
self.config_args()
if self.args.list_parsers:
self.list_parsers()
if self.args.verbosemode: print('Loading input files')
self.load_inputs()
if self.args.verbosemode: print('Running parsers')
self.run_parse()
if self.args.verbosemode: print('Merging data')
self.data_set['finalized_data'] = \
logdissect.utils.merge_logs(
self.data_set['data_set'], sort=True)
if self.args.verbosemode: print('Running filters')
self.run_filters()
if self.args.verbosemode: print('Running output')
self.run_output()
except KeyboardInterrupt:
sys.exit(1)
|
[
"def",
"run_job",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"load_parsers",
"(",
")",
"self",
".",
"load_filters",
"(",
")",
"self",
".",
"load_outputs",
"(",
")",
"self",
".",
"config_args",
"(",
")",
"if",
"self",
".",
"args",
".",
"list_parsers",
":",
"self",
".",
"list_parsers",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Loading input files'",
")",
"self",
".",
"load_inputs",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running parsers'",
")",
"self",
".",
"run_parse",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Merging data'",
")",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
"=",
"logdissect",
".",
"utils",
".",
"merge_logs",
"(",
"self",
".",
"data_set",
"[",
"'data_set'",
"]",
",",
"sort",
"=",
"True",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running filters'",
")",
"self",
".",
"run_filters",
"(",
")",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'Running output'",
")",
"self",
".",
"run_output",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Execute a logdissect job
|
[
"Execute",
"a",
"logdissect",
"job"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L58-L80
|
15,249
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.run_parse
|
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset)
|
python
|
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset)
|
[
"def",
"run_parse",
"(",
"self",
")",
":",
"# Data set already has source file names from load_inputs",
"parsedset",
"=",
"{",
"}",
"parsedset",
"[",
"'data_set'",
"]",
"=",
"[",
"]",
"for",
"log",
"in",
"self",
".",
"input_files",
":",
"parsemodule",
"=",
"self",
".",
"parse_modules",
"[",
"self",
".",
"args",
".",
"parser",
"]",
"try",
":",
"if",
"self",
".",
"args",
".",
"tzone",
":",
"parsemodule",
".",
"tzone",
"=",
"self",
".",
"args",
".",
"tzone",
"except",
"NameError",
":",
"pass",
"parsedset",
"[",
"'data_set'",
"]",
".",
"append",
"(",
"parsemodule",
".",
"parse_file",
"(",
"log",
")",
")",
"self",
".",
"data_set",
"=",
"parsedset",
"del",
"(",
"parsedset",
")"
] |
Parse one or more log files
|
[
"Parse",
"one",
"or",
"more",
"log",
"files"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L82-L95
|
15,250
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.run_output
|
def run_output(self):
"""Output finalized data"""
for f in logdissect.output.__formats__:
ouroutput = self.output_modules[f]
ouroutput.write_output(self.data_set['finalized_data'],
args=self.args)
del(ouroutput)
# Output to terminal if silent mode is not set:
if not self.args.silentmode:
if self.args.verbosemode:
print('\n==== ++++ ==== Output: ==== ++++ ====\n')
for line in self.data_set['finalized_data']['entries']:
print(line['raw_text'])
|
python
|
def run_output(self):
"""Output finalized data"""
for f in logdissect.output.__formats__:
ouroutput = self.output_modules[f]
ouroutput.write_output(self.data_set['finalized_data'],
args=self.args)
del(ouroutput)
# Output to terminal if silent mode is not set:
if not self.args.silentmode:
if self.args.verbosemode:
print('\n==== ++++ ==== Output: ==== ++++ ====\n')
for line in self.data_set['finalized_data']['entries']:
print(line['raw_text'])
|
[
"def",
"run_output",
"(",
"self",
")",
":",
"for",
"f",
"in",
"logdissect",
".",
"output",
".",
"__formats__",
":",
"ouroutput",
"=",
"self",
".",
"output_modules",
"[",
"f",
"]",
"ouroutput",
".",
"write_output",
"(",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
",",
"args",
"=",
"self",
".",
"args",
")",
"del",
"(",
"ouroutput",
")",
"# Output to terminal if silent mode is not set:",
"if",
"not",
"self",
".",
"args",
".",
"silentmode",
":",
"if",
"self",
".",
"args",
".",
"verbosemode",
":",
"print",
"(",
"'\\n==== ++++ ==== Output: ==== ++++ ====\\n'",
")",
"for",
"line",
"in",
"self",
".",
"data_set",
"[",
"'finalized_data'",
"]",
"[",
"'entries'",
"]",
":",
"print",
"(",
"line",
"[",
"'raw_text'",
"]",
")"
] |
Output finalized data
|
[
"Output",
"finalized",
"data"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L107-L120
|
15,251
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.config_args
|
def config_args(self):
"""Set config options"""
# Module list options:
self.arg_parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
self.arg_parser.add_argument('--verbose',
action='store_true', dest = 'verbosemode',
help=_('set verbose terminal output'))
self.arg_parser.add_argument('-s',
action='store_true', dest = 'silentmode',
help=_('silence terminal output'))
self.arg_parser.add_argument('--list-parsers',
action='store_true', dest='list_parsers',
help=_('return a list of available parsers'))
self.arg_parser.add_argument('-p',
action='store', dest='parser', default='syslog',
help=_('select a parser (default: syslog)'))
self.arg_parser.add_argument('-z', '--unzip',
action='store_true', dest='unzip',
help=_('include files compressed with gzip'))
self.arg_parser.add_argument('-t',
action='store', dest='tzone',
help=_('specify timezone offset to UTC (e.g. \'+0500\')'))
self.arg_parser.add_argument('files',
# nargs needs to be * not + so --list-filters/etc
# will work without file arg
metavar='file', nargs='*',
help=_('specify input files'))
# self.arg_parser.add_argument_group(self.parse_args)
self.arg_parser.add_argument_group(self.filter_args)
self.arg_parser.add_argument_group(self.output_args)
self.args = self.arg_parser.parse_args()
|
python
|
def config_args(self):
"""Set config options"""
# Module list options:
self.arg_parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
self.arg_parser.add_argument('--verbose',
action='store_true', dest = 'verbosemode',
help=_('set verbose terminal output'))
self.arg_parser.add_argument('-s',
action='store_true', dest = 'silentmode',
help=_('silence terminal output'))
self.arg_parser.add_argument('--list-parsers',
action='store_true', dest='list_parsers',
help=_('return a list of available parsers'))
self.arg_parser.add_argument('-p',
action='store', dest='parser', default='syslog',
help=_('select a parser (default: syslog)'))
self.arg_parser.add_argument('-z', '--unzip',
action='store_true', dest='unzip',
help=_('include files compressed with gzip'))
self.arg_parser.add_argument('-t',
action='store', dest='tzone',
help=_('specify timezone offset to UTC (e.g. \'+0500\')'))
self.arg_parser.add_argument('files',
# nargs needs to be * not + so --list-filters/etc
# will work without file arg
metavar='file', nargs='*',
help=_('specify input files'))
# self.arg_parser.add_argument_group(self.parse_args)
self.arg_parser.add_argument_group(self.filter_args)
self.arg_parser.add_argument_group(self.output_args)
self.args = self.arg_parser.parse_args()
|
[
"def",
"config_args",
"(",
"self",
")",
":",
"# Module list options:",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'%(prog)s '",
"+",
"str",
"(",
"__version__",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verbosemode'",
",",
"help",
"=",
"_",
"(",
"'set verbose terminal output'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-s'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'silentmode'",
",",
"help",
"=",
"_",
"(",
"'silence terminal output'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'--list-parsers'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'list_parsers'",
",",
"help",
"=",
"_",
"(",
"'return a list of available parsers'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'parser'",
",",
"default",
"=",
"'syslog'",
",",
"help",
"=",
"_",
"(",
"'select a parser (default: syslog)'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-z'",
",",
"'--unzip'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'unzip'",
",",
"help",
"=",
"_",
"(",
"'include files compressed with gzip'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'-t'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'tzone'",
",",
"help",
"=",
"_",
"(",
"'specify timezone offset to UTC (e.g. \\'+0500\\')'",
")",
")",
"self",
".",
"arg_parser",
".",
"add_argument",
"(",
"'files'",
",",
"# nargs needs to be * not + so --list-filters/etc",
"# will work without file arg",
"metavar",
"=",
"'file'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"_",
"(",
"'specify input files'",
")",
")",
"# self.arg_parser.add_argument_group(self.parse_args)",
"self",
".",
"arg_parser",
".",
"add_argument_group",
"(",
"self",
".",
"filter_args",
")",
"self",
".",
"arg_parser",
".",
"add_argument_group",
"(",
"self",
".",
"output_args",
")",
"self",
".",
"args",
"=",
"self",
".",
"arg_parser",
".",
"parse_args",
"(",
")"
] |
Set config options
|
[
"Set",
"config",
"options"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L124-L156
|
15,252
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.load_inputs
|
def load_inputs(self):
"""Load the specified inputs"""
for f in self.args.files:
if os.path.isfile(f):
fparts = str(f).split('.')
if fparts[-1] == 'gz':
if self.args.unzip:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
return 0
elif fparts[-1] == 'bz2' or fparts[-1] == 'zip':
return 0
else:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
print('File '+ f + ' not found')
return 1
|
python
|
def load_inputs(self):
"""Load the specified inputs"""
for f in self.args.files:
if os.path.isfile(f):
fparts = str(f).split('.')
if fparts[-1] == 'gz':
if self.args.unzip:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
return 0
elif fparts[-1] == 'bz2' or fparts[-1] == 'zip':
return 0
else:
fullpath = os.path.abspath(str(f))
self.input_files.append(fullpath)
else:
print('File '+ f + ' not found')
return 1
|
[
"def",
"load_inputs",
"(",
"self",
")",
":",
"for",
"f",
"in",
"self",
".",
"args",
".",
"files",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"fparts",
"=",
"str",
"(",
"f",
")",
".",
"split",
"(",
"'.'",
")",
"if",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'gz'",
":",
"if",
"self",
".",
"args",
".",
"unzip",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"str",
"(",
"f",
")",
")",
"self",
".",
"input_files",
".",
"append",
"(",
"fullpath",
")",
"else",
":",
"return",
"0",
"elif",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'bz2'",
"or",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'zip'",
":",
"return",
"0",
"else",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"str",
"(",
"f",
")",
")",
"self",
".",
"input_files",
".",
"append",
"(",
"fullpath",
")",
"else",
":",
"print",
"(",
"'File '",
"+",
"f",
"+",
"' not found'",
")",
"return",
"1"
] |
Load the specified inputs
|
[
"Load",
"the",
"specified",
"inputs"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L161-L179
|
15,253
|
dogoncouch/logdissect
|
logdissect/core.py
|
LogDissectCore.list_parsers
|
def list_parsers(self, *args):
"""Return a list of available parsing modules"""
print('==== Available parsing modules: ====\n')
for parser in sorted(self.parse_modules):
print(self.parse_modules[parser].name.ljust(16) + \
': ' + self.parse_modules[parser].desc)
sys.exit(0)
|
python
|
def list_parsers(self, *args):
"""Return a list of available parsing modules"""
print('==== Available parsing modules: ====\n')
for parser in sorted(self.parse_modules):
print(self.parse_modules[parser].name.ljust(16) + \
': ' + self.parse_modules[parser].desc)
sys.exit(0)
|
[
"def",
"list_parsers",
"(",
"self",
",",
"*",
"args",
")",
":",
"print",
"(",
"'==== Available parsing modules: ====\\n'",
")",
"for",
"parser",
"in",
"sorted",
"(",
"self",
".",
"parse_modules",
")",
":",
"print",
"(",
"self",
".",
"parse_modules",
"[",
"parser",
"]",
".",
"name",
".",
"ljust",
"(",
"16",
")",
"+",
"': '",
"+",
"self",
".",
"parse_modules",
"[",
"parser",
"]",
".",
"desc",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
Return a list of available parsing modules
|
[
"Return",
"a",
"list",
"of",
"available",
"parsing",
"modules"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L182-L188
|
15,254
|
dogoncouch/logdissect
|
logdissect/utils.py
|
get_utc_date
|
def get_utc_date(entry):
"""Return datestamp converted to UTC"""
if entry['numeric_date_stamp'] == '0':
entry['numeric_date_stamp_utc'] = '0'
return entry
else:
if '.' in entry['numeric_date_stamp']:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S.%f')
else:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S')
tdelta = timedelta(hours = int(entry['tzone'][1:3]),
minutes = int(entry['tzone'][3:5]))
if entry['tzone'][0] == '-':
ut = t + tdelta
else:
ut = t - tdelta
entry['numeric_date_stamp_utc'] = ut.strftime('%Y%m%d%H%M%S.%f')
return entry
|
python
|
def get_utc_date(entry):
"""Return datestamp converted to UTC"""
if entry['numeric_date_stamp'] == '0':
entry['numeric_date_stamp_utc'] = '0'
return entry
else:
if '.' in entry['numeric_date_stamp']:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S.%f')
else:
t = datetime.strptime(entry['numeric_date_stamp'],
'%Y%m%d%H%M%S')
tdelta = timedelta(hours = int(entry['tzone'][1:3]),
minutes = int(entry['tzone'][3:5]))
if entry['tzone'][0] == '-':
ut = t + tdelta
else:
ut = t - tdelta
entry['numeric_date_stamp_utc'] = ut.strftime('%Y%m%d%H%M%S.%f')
return entry
|
[
"def",
"get_utc_date",
"(",
"entry",
")",
":",
"if",
"entry",
"[",
"'numeric_date_stamp'",
"]",
"==",
"'0'",
":",
"entry",
"[",
"'numeric_date_stamp_utc'",
"]",
"=",
"'0'",
"return",
"entry",
"else",
":",
"if",
"'.'",
"in",
"entry",
"[",
"'numeric_date_stamp'",
"]",
":",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"entry",
"[",
"'numeric_date_stamp'",
"]",
",",
"'%Y%m%d%H%M%S.%f'",
")",
"else",
":",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"entry",
"[",
"'numeric_date_stamp'",
"]",
",",
"'%Y%m%d%H%M%S'",
")",
"tdelta",
"=",
"timedelta",
"(",
"hours",
"=",
"int",
"(",
"entry",
"[",
"'tzone'",
"]",
"[",
"1",
":",
"3",
"]",
")",
",",
"minutes",
"=",
"int",
"(",
"entry",
"[",
"'tzone'",
"]",
"[",
"3",
":",
"5",
"]",
")",
")",
"if",
"entry",
"[",
"'tzone'",
"]",
"[",
"0",
"]",
"==",
"'-'",
":",
"ut",
"=",
"t",
"+",
"tdelta",
"else",
":",
"ut",
"=",
"t",
"-",
"tdelta",
"entry",
"[",
"'numeric_date_stamp_utc'",
"]",
"=",
"ut",
".",
"strftime",
"(",
"'%Y%m%d%H%M%S.%f'",
")",
"return",
"entry"
] |
Return datestamp converted to UTC
|
[
"Return",
"datestamp",
"converted",
"to",
"UTC"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L145-L168
|
15,255
|
dogoncouch/logdissect
|
logdissect/utils.py
|
get_local_tzone
|
def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone
|
python
|
def get_local_tzone():
"""Get the current time zone on the local host"""
if localtime().tm_isdst:
if altzone < 0:
tzone = '+' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
tzone = '-' + \
str(int(float(altzone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
altzone) / 60 % 60)).ljust(2, '0')
else:
if altzone < 0:
tzone = \
'+' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
else:
tzone = \
'-' + str(int(float(timezone) / 60 // 60)).rjust(2,
'0') + \
str(int(float(
timezone) / 60 % 60)).ljust(2, '0')
return tzone
|
[
"def",
"get_local_tzone",
"(",
")",
":",
"if",
"localtime",
"(",
")",
".",
"tm_isdst",
":",
"if",
"altzone",
"<",
"0",
":",
"tzone",
"=",
"'+'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"tzone",
"=",
"'-'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"altzone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"if",
"altzone",
"<",
"0",
":",
"tzone",
"=",
"'+'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"else",
":",
"tzone",
"=",
"'-'",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"//",
"60",
")",
")",
".",
"rjust",
"(",
"2",
",",
"'0'",
")",
"+",
"str",
"(",
"int",
"(",
"float",
"(",
"timezone",
")",
"/",
"60",
"%",
"60",
")",
")",
".",
"ljust",
"(",
"2",
",",
"'0'",
")",
"return",
"tzone"
] |
Get the current time zone on the local host
|
[
"Get",
"the",
"current",
"time",
"zone",
"on",
"the",
"local",
"host"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L171-L200
|
15,256
|
dogoncouch/logdissect
|
logdissect/utils.py
|
merge_logs
|
def merge_logs(dataset, sort=True):
"""Merge log dictionaries together into one log dictionary"""
ourlog = {}
ourlog['entries'] = []
for d in dataset:
ourlog['entries'] = ourlog['entries'] + d['entries']
if sort:
ourlog['entries'].sort(key= lambda x: x['numeric_date_stamp_utc'])
return ourlog
|
python
|
def merge_logs(dataset, sort=True):
"""Merge log dictionaries together into one log dictionary"""
ourlog = {}
ourlog['entries'] = []
for d in dataset:
ourlog['entries'] = ourlog['entries'] + d['entries']
if sort:
ourlog['entries'].sort(key= lambda x: x['numeric_date_stamp_utc'])
return ourlog
|
[
"def",
"merge_logs",
"(",
"dataset",
",",
"sort",
"=",
"True",
")",
":",
"ourlog",
"=",
"{",
"}",
"ourlog",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"for",
"d",
"in",
"dataset",
":",
"ourlog",
"[",
"'entries'",
"]",
"=",
"ourlog",
"[",
"'entries'",
"]",
"+",
"d",
"[",
"'entries'",
"]",
"if",
"sort",
":",
"ourlog",
"[",
"'entries'",
"]",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'numeric_date_stamp_utc'",
"]",
")",
"return",
"ourlog"
] |
Merge log dictionaries together into one log dictionary
|
[
"Merge",
"log",
"dictionaries",
"together",
"into",
"one",
"log",
"dictionary"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/utils.py#L203-L212
|
15,257
|
dogoncouch/logdissect
|
logdissect/output/log.py
|
OutputModule.write_output
|
def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path']
|
python
|
def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path']
|
[
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"args",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"label",
"=",
"None",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"outlog",
":",
"return",
"0",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"outlog",
"lastpath",
"=",
"''",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"for",
"entry",
"in",
"data",
"[",
"'entries'",
"]",
":",
"if",
"args",
".",
"label",
":",
"if",
"entry",
"[",
"'source_path'",
"]",
"==",
"lastpath",
":",
"output_file",
".",
"write",
"(",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"elif",
"args",
".",
"label",
"==",
"'fname'",
":",
"output_file",
".",
"write",
"(",
"'======== '",
"+",
"entry",
"[",
"'source_path'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"+",
"' >>>>\\n'",
"+",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"elif",
"args",
".",
"label",
"==",
"'fpath'",
":",
"output_file",
".",
"write",
"(",
"'======== '",
"+",
"entry",
"[",
"'source_path'",
"]",
"+",
"' >>>>\\n'",
"+",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"else",
":",
"output_file",
".",
"write",
"(",
"entry",
"[",
"'raw_text'",
"]",
"+",
"'\\n'",
")",
"lastpath",
"=",
"entry",
"[",
"'source_path'",
"]"
] |
Write log data to a log file
|
[
"Write",
"log",
"data",
"to",
"a",
"log",
"file"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/log.py#L37-L58
|
15,258
|
dogoncouch/logdissect
|
logdissect/output/sojson.py
|
OutputModule.write_output
|
def write_output(self, data, args=None, filename=None, pretty=False):
"""Write log data to a single JSON object"""
if args:
if not args.sojson:
return 0
pretty = args.pretty
if not filename: filename = args.sojson
if pretty:
logstring = json.dumps(
data['entries'], indent=2, sort_keys=True,
separators=(',', ': '))
else:
logstring = json.dumps(data['entries'], sort_keys=True)
with open(str(filename), 'w') as output_file:
output_file.write(logstring)
|
python
|
def write_output(self, data, args=None, filename=None, pretty=False):
"""Write log data to a single JSON object"""
if args:
if not args.sojson:
return 0
pretty = args.pretty
if not filename: filename = args.sojson
if pretty:
logstring = json.dumps(
data['entries'], indent=2, sort_keys=True,
separators=(',', ': '))
else:
logstring = json.dumps(data['entries'], sort_keys=True)
with open(str(filename), 'w') as output_file:
output_file.write(logstring)
|
[
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"args",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"pretty",
"=",
"False",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"sojson",
":",
"return",
"0",
"pretty",
"=",
"args",
".",
"pretty",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"sojson",
"if",
"pretty",
":",
"logstring",
"=",
"json",
".",
"dumps",
"(",
"data",
"[",
"'entries'",
"]",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"else",
":",
"logstring",
"=",
"json",
".",
"dumps",
"(",
"data",
"[",
"'entries'",
"]",
",",
"sort_keys",
"=",
"True",
")",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"logstring",
")"
] |
Write log data to a single JSON object
|
[
"Write",
"log",
"data",
"to",
"a",
"single",
"JSON",
"object"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/sojson.py#L38-L53
|
15,259
|
dogoncouch/logdissect
|
logdissect/output/linejson.py
|
OutputModule.write_output
|
def write_output(self, data, filename=None, args=None):
"""Write log data to a file with one JSON object per line"""
if args:
if not args.linejson:
return 0
if not filename: filename = args.linejson
entrylist = []
for entry in data['entries']:
entrystring = json.dumps(entry, sort_keys=True)
entrylist.append(entrystring)
with open(str(filename), 'w') as output_file:
output_file.write('\n'.join(entrylist))
|
python
|
def write_output(self, data, filename=None, args=None):
"""Write log data to a file with one JSON object per line"""
if args:
if not args.linejson:
return 0
if not filename: filename = args.linejson
entrylist = []
for entry in data['entries']:
entrystring = json.dumps(entry, sort_keys=True)
entrylist.append(entrystring)
with open(str(filename), 'w') as output_file:
output_file.write('\n'.join(entrylist))
|
[
"def",
"write_output",
"(",
"self",
",",
"data",
",",
"filename",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"if",
"args",
":",
"if",
"not",
"args",
".",
"linejson",
":",
"return",
"0",
"if",
"not",
"filename",
":",
"filename",
"=",
"args",
".",
"linejson",
"entrylist",
"=",
"[",
"]",
"for",
"entry",
"in",
"data",
"[",
"'entries'",
"]",
":",
"entrystring",
"=",
"json",
".",
"dumps",
"(",
"entry",
",",
"sort_keys",
"=",
"True",
")",
"entrylist",
".",
"append",
"(",
"entrystring",
")",
"with",
"open",
"(",
"str",
"(",
"filename",
")",
",",
"'w'",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"entrylist",
")",
")"
] |
Write log data to a file with one JSON object per line
|
[
"Write",
"log",
"data",
"to",
"a",
"file",
"with",
"one",
"JSON",
"object",
"per",
"line"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/output/linejson.py#L36-L48
|
15,260
|
dogoncouch/logdissect
|
logdissect/parsers/type.py
|
ParseModule.parse_file
|
def parse_file(self, sourcepath):
"""Parse a file into a LogData object"""
# Get regex objects:
self.date_regex = re.compile(
r'{}'.format(self.format_regex))
if self.backup_format_regex:
self.backup_date_regex = re.compile(
r'{}'.format(self.backup_format_regex))
data = {}
data['entries'] = []
data['parser'] = self.name
data['source_path'] = sourcepath
data['source_file'] = sourcepath.split('/')[-1]
# Set our start year:
data['source_file_mtime'] = os.path.getmtime(data['source_path'])
timestamp = datetime.fromtimestamp(data['source_file_mtime'])
data['source_file_year'] = timestamp.year
entryyear = timestamp.year
currentmonth = '99'
if self.datestamp_type == 'nodate':
self.datedata = {}
self.datedata['timestamp'] = timestamp
self.datedata['entry_time'] = int(timestamp.strftime('%H%M%S'))
# Set our timezone
if not self.tzone:
self.backuptzone = logdissect.utils.get_local_tzone()
# Parsing works in reverse. This helps with multi-line entries,
# and logs that span multiple years (December to January shift).
# Get our lines:
fparts = sourcepath.split('.')
if fparts[-1] == 'gz':
with gzip.open(sourcepath, 'r') as logfile:
loglines = reversed(logfile.readlines())
else:
with open(str(sourcepath), 'r') as logfile:
loglines = reversed(logfile.readlines())
# Parse our lines:
for line in loglines:
ourline = line.rstrip()
# Send the line to self.parse_line
entry = self.parse_line(ourline)
if entry:
if 'date_stamp' in self.fields:
# Check for Dec-Jan jump and set the year:
if self.datestamp_type == 'standard':
if int(entry['month']) > int(currentmonth):
entryyear = entryyear - 1
currentmonth = entry['month']
entry['numeric_date_stamp'] = str(entryyear) \
+ entry['month'] + entry['day'] + \
entry['tstamp']
entry['year'] = str(entryyear)
if self.tzone:
entry['tzone'] = self.tzone
else:
entry['tzone'] = self.backuptzone
entry = logdissect.utils.get_utc_date(entry)
entry['raw_text'] = ourline
entry['source_path'] = data['source_path']
# Append current entry
data['entries'].append(entry)
else:
continue
# Write the entries to the log object
data['entries'].reverse()
return data
|
python
|
def parse_file(self, sourcepath):
"""Parse a file into a LogData object"""
# Get regex objects:
self.date_regex = re.compile(
r'{}'.format(self.format_regex))
if self.backup_format_regex:
self.backup_date_regex = re.compile(
r'{}'.format(self.backup_format_regex))
data = {}
data['entries'] = []
data['parser'] = self.name
data['source_path'] = sourcepath
data['source_file'] = sourcepath.split('/')[-1]
# Set our start year:
data['source_file_mtime'] = os.path.getmtime(data['source_path'])
timestamp = datetime.fromtimestamp(data['source_file_mtime'])
data['source_file_year'] = timestamp.year
entryyear = timestamp.year
currentmonth = '99'
if self.datestamp_type == 'nodate':
self.datedata = {}
self.datedata['timestamp'] = timestamp
self.datedata['entry_time'] = int(timestamp.strftime('%H%M%S'))
# Set our timezone
if not self.tzone:
self.backuptzone = logdissect.utils.get_local_tzone()
# Parsing works in reverse. This helps with multi-line entries,
# and logs that span multiple years (December to January shift).
# Get our lines:
fparts = sourcepath.split('.')
if fparts[-1] == 'gz':
with gzip.open(sourcepath, 'r') as logfile:
loglines = reversed(logfile.readlines())
else:
with open(str(sourcepath), 'r') as logfile:
loglines = reversed(logfile.readlines())
# Parse our lines:
for line in loglines:
ourline = line.rstrip()
# Send the line to self.parse_line
entry = self.parse_line(ourline)
if entry:
if 'date_stamp' in self.fields:
# Check for Dec-Jan jump and set the year:
if self.datestamp_type == 'standard':
if int(entry['month']) > int(currentmonth):
entryyear = entryyear - 1
currentmonth = entry['month']
entry['numeric_date_stamp'] = str(entryyear) \
+ entry['month'] + entry['day'] + \
entry['tstamp']
entry['year'] = str(entryyear)
if self.tzone:
entry['tzone'] = self.tzone
else:
entry['tzone'] = self.backuptzone
entry = logdissect.utils.get_utc_date(entry)
entry['raw_text'] = ourline
entry['source_path'] = data['source_path']
# Append current entry
data['entries'].append(entry)
else:
continue
# Write the entries to the log object
data['entries'].reverse()
return data
|
[
"def",
"parse_file",
"(",
"self",
",",
"sourcepath",
")",
":",
"# Get regex objects:",
"self",
".",
"date_regex",
"=",
"re",
".",
"compile",
"(",
"r'{}'",
".",
"format",
"(",
"self",
".",
"format_regex",
")",
")",
"if",
"self",
".",
"backup_format_regex",
":",
"self",
".",
"backup_date_regex",
"=",
"re",
".",
"compile",
"(",
"r'{}'",
".",
"format",
"(",
"self",
".",
"backup_format_regex",
")",
")",
"data",
"=",
"{",
"}",
"data",
"[",
"'entries'",
"]",
"=",
"[",
"]",
"data",
"[",
"'parser'",
"]",
"=",
"self",
".",
"name",
"data",
"[",
"'source_path'",
"]",
"=",
"sourcepath",
"data",
"[",
"'source_file'",
"]",
"=",
"sourcepath",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"# Set our start year:",
"data",
"[",
"'source_file_mtime'",
"]",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"data",
"[",
"'source_path'",
"]",
")",
"timestamp",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"data",
"[",
"'source_file_mtime'",
"]",
")",
"data",
"[",
"'source_file_year'",
"]",
"=",
"timestamp",
".",
"year",
"entryyear",
"=",
"timestamp",
".",
"year",
"currentmonth",
"=",
"'99'",
"if",
"self",
".",
"datestamp_type",
"==",
"'nodate'",
":",
"self",
".",
"datedata",
"=",
"{",
"}",
"self",
".",
"datedata",
"[",
"'timestamp'",
"]",
"=",
"timestamp",
"self",
".",
"datedata",
"[",
"'entry_time'",
"]",
"=",
"int",
"(",
"timestamp",
".",
"strftime",
"(",
"'%H%M%S'",
")",
")",
"# Set our timezone",
"if",
"not",
"self",
".",
"tzone",
":",
"self",
".",
"backuptzone",
"=",
"logdissect",
".",
"utils",
".",
"get_local_tzone",
"(",
")",
"# Parsing works in reverse. This helps with multi-line entries,",
"# and logs that span multiple years (December to January shift).",
"# Get our lines:",
"fparts",
"=",
"sourcepath",
".",
"split",
"(",
"'.'",
")",
"if",
"fparts",
"[",
"-",
"1",
"]",
"==",
"'gz'",
":",
"with",
"gzip",
".",
"open",
"(",
"sourcepath",
",",
"'r'",
")",
"as",
"logfile",
":",
"loglines",
"=",
"reversed",
"(",
"logfile",
".",
"readlines",
"(",
")",
")",
"else",
":",
"with",
"open",
"(",
"str",
"(",
"sourcepath",
")",
",",
"'r'",
")",
"as",
"logfile",
":",
"loglines",
"=",
"reversed",
"(",
"logfile",
".",
"readlines",
"(",
")",
")",
"# Parse our lines:",
"for",
"line",
"in",
"loglines",
":",
"ourline",
"=",
"line",
".",
"rstrip",
"(",
")",
"# Send the line to self.parse_line",
"entry",
"=",
"self",
".",
"parse_line",
"(",
"ourline",
")",
"if",
"entry",
":",
"if",
"'date_stamp'",
"in",
"self",
".",
"fields",
":",
"# Check for Dec-Jan jump and set the year:",
"if",
"self",
".",
"datestamp_type",
"==",
"'standard'",
":",
"if",
"int",
"(",
"entry",
"[",
"'month'",
"]",
")",
">",
"int",
"(",
"currentmonth",
")",
":",
"entryyear",
"=",
"entryyear",
"-",
"1",
"currentmonth",
"=",
"entry",
"[",
"'month'",
"]",
"entry",
"[",
"'numeric_date_stamp'",
"]",
"=",
"str",
"(",
"entryyear",
")",
"+",
"entry",
"[",
"'month'",
"]",
"+",
"entry",
"[",
"'day'",
"]",
"+",
"entry",
"[",
"'tstamp'",
"]",
"entry",
"[",
"'year'",
"]",
"=",
"str",
"(",
"entryyear",
")",
"if",
"self",
".",
"tzone",
":",
"entry",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"tzone",
"else",
":",
"entry",
"[",
"'tzone'",
"]",
"=",
"self",
".",
"backuptzone",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"get_utc_date",
"(",
"entry",
")",
"entry",
"[",
"'raw_text'",
"]",
"=",
"ourline",
"entry",
"[",
"'source_path'",
"]",
"=",
"data",
"[",
"'source_path'",
"]",
"# Append current entry",
"data",
"[",
"'entries'",
"]",
".",
"append",
"(",
"entry",
")",
"else",
":",
"continue",
"# Write the entries to the log object",
"data",
"[",
"'entries'",
"]",
".",
"reverse",
"(",
")",
"return",
"data"
] |
Parse a file into a LogData object
|
[
"Parse",
"a",
"file",
"into",
"a",
"LogData",
"object"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/type.py#L46-L122
|
15,261
|
dogoncouch/logdissect
|
logdissect/parsers/type.py
|
ParseModule.parse_line
|
def parse_line(self, line):
"""Parse a line into a dictionary"""
match = re.findall(self.date_regex, line)
if match:
fields = self.fields
elif self.backup_format_regex and not match:
match = re.findall(self.backup_date_regex, line)
fields = self.backup_fields
if match:
entry = {}
entry['raw_text'] = line
entry['parser'] = self.name
matchlist = list(zip(fields, match[0]))
for f, v in matchlist:
entry[f] = v
if 'date_stamp' in entry.keys():
if self.datestamp_type == 'standard':
entry = logdissect.utils.convert_standard_datestamp(entry)
elif self.datestamp_type == 'iso':
entry = logdissect.utils.convert_iso_datestamp(
entry)
elif self.datestamp_type == 'webaccess':
entry = logdissect.utils.convert_webaccess_datestamp(
entry)
elif self.datestamp_type == 'nodate':
entry, self.datedata = \
logdissect.utils.convert_nodate_datestamp(
entry, self.datedata)
elif self.datestamp_type == 'unix':
entry = logdissect.utils.convert_unix_datestamp(
entry)
if self.datestamp_type == 'now':
entry = logdissect.utils.convert_now_datestamp(
entry)
entry = self.post_parse_action(entry)
return entry
else:
return None
|
python
|
def parse_line(self, line):
"""Parse a line into a dictionary"""
match = re.findall(self.date_regex, line)
if match:
fields = self.fields
elif self.backup_format_regex and not match:
match = re.findall(self.backup_date_regex, line)
fields = self.backup_fields
if match:
entry = {}
entry['raw_text'] = line
entry['parser'] = self.name
matchlist = list(zip(fields, match[0]))
for f, v in matchlist:
entry[f] = v
if 'date_stamp' in entry.keys():
if self.datestamp_type == 'standard':
entry = logdissect.utils.convert_standard_datestamp(entry)
elif self.datestamp_type == 'iso':
entry = logdissect.utils.convert_iso_datestamp(
entry)
elif self.datestamp_type == 'webaccess':
entry = logdissect.utils.convert_webaccess_datestamp(
entry)
elif self.datestamp_type == 'nodate':
entry, self.datedata = \
logdissect.utils.convert_nodate_datestamp(
entry, self.datedata)
elif self.datestamp_type == 'unix':
entry = logdissect.utils.convert_unix_datestamp(
entry)
if self.datestamp_type == 'now':
entry = logdissect.utils.convert_now_datestamp(
entry)
entry = self.post_parse_action(entry)
return entry
else:
return None
|
[
"def",
"parse_line",
"(",
"self",
",",
"line",
")",
":",
"match",
"=",
"re",
".",
"findall",
"(",
"self",
".",
"date_regex",
",",
"line",
")",
"if",
"match",
":",
"fields",
"=",
"self",
".",
"fields",
"elif",
"self",
".",
"backup_format_regex",
"and",
"not",
"match",
":",
"match",
"=",
"re",
".",
"findall",
"(",
"self",
".",
"backup_date_regex",
",",
"line",
")",
"fields",
"=",
"self",
".",
"backup_fields",
"if",
"match",
":",
"entry",
"=",
"{",
"}",
"entry",
"[",
"'raw_text'",
"]",
"=",
"line",
"entry",
"[",
"'parser'",
"]",
"=",
"self",
".",
"name",
"matchlist",
"=",
"list",
"(",
"zip",
"(",
"fields",
",",
"match",
"[",
"0",
"]",
")",
")",
"for",
"f",
",",
"v",
"in",
"matchlist",
":",
"entry",
"[",
"f",
"]",
"=",
"v",
"if",
"'date_stamp'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"if",
"self",
".",
"datestamp_type",
"==",
"'standard'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_standard_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'iso'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_iso_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'webaccess'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_webaccess_datestamp",
"(",
"entry",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'nodate'",
":",
"entry",
",",
"self",
".",
"datedata",
"=",
"logdissect",
".",
"utils",
".",
"convert_nodate_datestamp",
"(",
"entry",
",",
"self",
".",
"datedata",
")",
"elif",
"self",
".",
"datestamp_type",
"==",
"'unix'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_unix_datestamp",
"(",
"entry",
")",
"if",
"self",
".",
"datestamp_type",
"==",
"'now'",
":",
"entry",
"=",
"logdissect",
".",
"utils",
".",
"convert_now_datestamp",
"(",
"entry",
")",
"entry",
"=",
"self",
".",
"post_parse_action",
"(",
"entry",
")",
"return",
"entry",
"else",
":",
"return",
"None"
] |
Parse a line into a dictionary
|
[
"Parse",
"a",
"line",
"into",
"a",
"dictionary"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/type.py#L125-L167
|
15,262
|
dogoncouch/logdissect
|
logdissect/parsers/tcpdump.py
|
ParseModule.post_parse_action
|
def post_parse_action(self, entry):
"""separate hosts and ports after entry is parsed"""
if 'source_host' in entry.keys():
host = self.ip_port_regex.findall(entry['source_host'])
if host:
hlist = host[0].split('.')
entry['source_host'] = '.'.join(hlist[:4])
entry['source_port'] = hlist[-1]
if 'dest_host' in entry.keys():
host = self.ip_port_regex.findall(entry['dest_host'])
if host:
hlist = host[0].split('.')
entry['dest_host'] = '.'.join(hlist[:4])
entry['dest_port'] = hlist[-1]
return entry
|
python
|
def post_parse_action(self, entry):
"""separate hosts and ports after entry is parsed"""
if 'source_host' in entry.keys():
host = self.ip_port_regex.findall(entry['source_host'])
if host:
hlist = host[0].split('.')
entry['source_host'] = '.'.join(hlist[:4])
entry['source_port'] = hlist[-1]
if 'dest_host' in entry.keys():
host = self.ip_port_regex.findall(entry['dest_host'])
if host:
hlist = host[0].split('.')
entry['dest_host'] = '.'.join(hlist[:4])
entry['dest_port'] = hlist[-1]
return entry
|
[
"def",
"post_parse_action",
"(",
"self",
",",
"entry",
")",
":",
"if",
"'source_host'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"host",
"=",
"self",
".",
"ip_port_regex",
".",
"findall",
"(",
"entry",
"[",
"'source_host'",
"]",
")",
"if",
"host",
":",
"hlist",
"=",
"host",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"entry",
"[",
"'source_host'",
"]",
"=",
"'.'",
".",
"join",
"(",
"hlist",
"[",
":",
"4",
"]",
")",
"entry",
"[",
"'source_port'",
"]",
"=",
"hlist",
"[",
"-",
"1",
"]",
"if",
"'dest_host'",
"in",
"entry",
".",
"keys",
"(",
")",
":",
"host",
"=",
"self",
".",
"ip_port_regex",
".",
"findall",
"(",
"entry",
"[",
"'dest_host'",
"]",
")",
"if",
"host",
":",
"hlist",
"=",
"host",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"entry",
"[",
"'dest_host'",
"]",
"=",
"'.'",
".",
"join",
"(",
"hlist",
"[",
":",
"4",
"]",
")",
"entry",
"[",
"'dest_port'",
"]",
"=",
"hlist",
"[",
"-",
"1",
"]",
"return",
"entry"
] |
separate hosts and ports after entry is parsed
|
[
"separate",
"hosts",
"and",
"ports",
"after",
"entry",
"is",
"parsed"
] |
426b50264cbfa9665c86df3781e1e415ba8dbbd3
|
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/tcpdump.py#L42-L57
|
15,263
|
vtraag/louvain-igraph
|
src/functions.py
|
find_partition_multiplex
|
def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement
|
python
|
def find_partition_multiplex(graphs, partition_type, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement
|
[
"def",
"find_partition_multiplex",
"(",
"graphs",
",",
"partition_type",
",",
"*",
"*",
"kwargs",
")",
":",
"n_layers",
"=",
"len",
"(",
"graphs",
")",
"partitions",
"=",
"[",
"]",
"layer_weights",
"=",
"[",
"1",
"]",
"*",
"n_layers",
"for",
"graph",
"in",
"graphs",
":",
"partitions",
".",
"append",
"(",
"partition_type",
"(",
"graph",
",",
"*",
"*",
"kwargs",
")",
")",
"optimiser",
"=",
"Optimiser",
"(",
")",
"improvement",
"=",
"optimiser",
".",
"optimise_partition_multiplex",
"(",
"partitions",
",",
"layer_weights",
")",
"return",
"partitions",
"[",
"0",
"]",
".",
"membership",
",",
"improvement"
] |
Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
|
[
"Detect",
"communities",
"for",
"multiplex",
"graphs",
"."
] |
8de2c3bad736a9deea90b80f104d8444769d331f
|
https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/functions.py#L81-L136
|
15,264
|
vtraag/louvain-igraph
|
src/functions.py
|
find_partition_temporal
|
def find_partition_temporal(graphs, partition_type,
interslice_weight=1,
slice_attr='slice', vertex_id_attr='id',
edge_type_attr='type', weight_attr='weight',
**kwargs):
""" Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1)
"""
# Create layers
G_layers, G_interslice, G = time_slices_to_layers(graphs,
interslice_weight,
slice_attr=slice_attr,
vertex_id_attr=vertex_id_attr,
edge_type_attr=edge_type_attr,
weight_attr=weight_attr)
# Optimise partitions
arg_dict = {}
if 'node_sizes' in partition_type.__init__.__code__.co_varnames:
arg_dict['node_sizes'] = 'node_size'
if 'weights' in partition_type.__init__.__code__.co_varnames:
arg_dict['weights'] = 'weight'
arg_dict.update(kwargs)
partitions = []
for H in G_layers:
arg_dict['graph'] = H
partitions.append(partition_type(**arg_dict))
# We can always take the same interslice partition, as this should have no
# cost in the optimisation.
partition_interslice = CPMVertexPartition(G_interslice, resolution_parameter=0,
node_sizes='node_size', weights=weight_attr)
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions + [partition_interslice])
# Transform results back into original form.
membership = {(v[slice_attr], v[vertex_id_attr]): m for v, m in zip(G.vs, partitions[0].membership)}
membership_time_slices = []
for slice_idx, H in enumerate(graphs):
membership_slice = [membership[(slice_idx, v[vertex_id_attr])] for v in H.vs]
membership_time_slices.append(list(membership_slice))
return membership_time_slices, improvement
|
python
|
def find_partition_temporal(graphs, partition_type,
interslice_weight=1,
slice_attr='slice', vertex_id_attr='id',
edge_type_attr='type', weight_attr='weight',
**kwargs):
""" Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1)
"""
# Create layers
G_layers, G_interslice, G = time_slices_to_layers(graphs,
interslice_weight,
slice_attr=slice_attr,
vertex_id_attr=vertex_id_attr,
edge_type_attr=edge_type_attr,
weight_attr=weight_attr)
# Optimise partitions
arg_dict = {}
if 'node_sizes' in partition_type.__init__.__code__.co_varnames:
arg_dict['node_sizes'] = 'node_size'
if 'weights' in partition_type.__init__.__code__.co_varnames:
arg_dict['weights'] = 'weight'
arg_dict.update(kwargs)
partitions = []
for H in G_layers:
arg_dict['graph'] = H
partitions.append(partition_type(**arg_dict))
# We can always take the same interslice partition, as this should have no
# cost in the optimisation.
partition_interslice = CPMVertexPartition(G_interslice, resolution_parameter=0,
node_sizes='node_size', weights=weight_attr)
optimiser = Optimiser()
improvement = optimiser.optimise_partition_multiplex(partitions + [partition_interslice])
# Transform results back into original form.
membership = {(v[slice_attr], v[vertex_id_attr]): m for v, m in zip(G.vs, partitions[0].membership)}
membership_time_slices = []
for slice_idx, H in enumerate(graphs):
membership_slice = [membership[(slice_idx, v[vertex_id_attr])] for v in H.vs]
membership_time_slices.append(list(membership_slice))
return membership_time_slices, improvement
|
[
"def",
"find_partition_temporal",
"(",
"graphs",
",",
"partition_type",
",",
"interslice_weight",
"=",
"1",
",",
"slice_attr",
"=",
"'slice'",
",",
"vertex_id_attr",
"=",
"'id'",
",",
"edge_type_attr",
"=",
"'type'",
",",
"weight_attr",
"=",
"'weight'",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create layers",
"G_layers",
",",
"G_interslice",
",",
"G",
"=",
"time_slices_to_layers",
"(",
"graphs",
",",
"interslice_weight",
",",
"slice_attr",
"=",
"slice_attr",
",",
"vertex_id_attr",
"=",
"vertex_id_attr",
",",
"edge_type_attr",
"=",
"edge_type_attr",
",",
"weight_attr",
"=",
"weight_attr",
")",
"# Optimise partitions",
"arg_dict",
"=",
"{",
"}",
"if",
"'node_sizes'",
"in",
"partition_type",
".",
"__init__",
".",
"__code__",
".",
"co_varnames",
":",
"arg_dict",
"[",
"'node_sizes'",
"]",
"=",
"'node_size'",
"if",
"'weights'",
"in",
"partition_type",
".",
"__init__",
".",
"__code__",
".",
"co_varnames",
":",
"arg_dict",
"[",
"'weights'",
"]",
"=",
"'weight'",
"arg_dict",
".",
"update",
"(",
"kwargs",
")",
"partitions",
"=",
"[",
"]",
"for",
"H",
"in",
"G_layers",
":",
"arg_dict",
"[",
"'graph'",
"]",
"=",
"H",
"partitions",
".",
"append",
"(",
"partition_type",
"(",
"*",
"*",
"arg_dict",
")",
")",
"# We can always take the same interslice partition, as this should have no",
"# cost in the optimisation.",
"partition_interslice",
"=",
"CPMVertexPartition",
"(",
"G_interslice",
",",
"resolution_parameter",
"=",
"0",
",",
"node_sizes",
"=",
"'node_size'",
",",
"weights",
"=",
"weight_attr",
")",
"optimiser",
"=",
"Optimiser",
"(",
")",
"improvement",
"=",
"optimiser",
".",
"optimise_partition_multiplex",
"(",
"partitions",
"+",
"[",
"partition_interslice",
"]",
")",
"# Transform results back into original form.",
"membership",
"=",
"{",
"(",
"v",
"[",
"slice_attr",
"]",
",",
"v",
"[",
"vertex_id_attr",
"]",
")",
":",
"m",
"for",
"v",
",",
"m",
"in",
"zip",
"(",
"G",
".",
"vs",
",",
"partitions",
"[",
"0",
"]",
".",
"membership",
")",
"}",
"membership_time_slices",
"=",
"[",
"]",
"for",
"slice_idx",
",",
"H",
"in",
"enumerate",
"(",
"graphs",
")",
":",
"membership_slice",
"=",
"[",
"membership",
"[",
"(",
"slice_idx",
",",
"v",
"[",
"vertex_id_attr",
"]",
")",
"]",
"for",
"v",
"in",
"H",
".",
"vs",
"]",
"membership_time_slices",
".",
"append",
"(",
"list",
"(",
"membership_slice",
")",
")",
"return",
"membership_time_slices",
",",
"improvement"
] |
Detect communities for temporal graphs.
Each graph is considered to represent a time slice and does not necessarily
need to be defined on the same set of vertices. Nodes in two consecutive
slices are identified on the basis of the ``vertex_id_attr``, i.e. if two
nodes in two consecutive slices have an identical value of the
``vertex_id_attr`` they are coupled. The ``vertex_id_attr`` should hence be
unique in each slice. The nodes are then coupled with a weight of
``interslice_weight`` which is set in the edge attribute ``weight_attr``. No
weight is set if the ``interslice_weight`` is None (i.e. corresponding in
practice with a weight of 1). See :func:`time_slices_to_layers` for
a more detailed explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`VertexPartition.MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
interslice_weight : float
The weight of the coupling between two consecutive time slices.
slice_attr : string
The vertex attribute to use for indicating the slice of a node.
vertex_id_attr : string
The vertex to use to identify nodes.
edge_type_attr : string
The edge attribute to use for indicating the type of link (`interslice` or
`intraslice`).
weight_attr : string
The edge attribute used to indicate the weight.
**kwargs
Remaining keyword arguments, passed on to constructor of
``partition_type``.
Returns
-------
list of membership
list containing for each slice the membership vector.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
See Also
--------
:func:`time_slices_to_layers`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_1.vs['id'] = range(n)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> G_2.vs['id'] = range(n)
>>> membership, improvement = louvain.find_partition_temporal([G_1, G_2],
... louvain.ModularityVertexPartition,
... interslice_weight=1)
|
[
"Detect",
"communities",
"for",
"temporal",
"graphs",
"."
] |
8de2c3bad736a9deea90b80f104d8444769d331f
|
https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/functions.py#L138-L245
|
15,265
|
vtraag/louvain-igraph
|
setup.py
|
BuildConfiguration.build_ext
|
def build_ext(self):
"""Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed."""
try:
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.command.build_ext import build_ext
buildcfg = self
class custom_build_ext(build_ext):
def run(self):
# Print a warning if pkg-config is not available or does not know about igraph
if buildcfg.use_pkgconfig:
detected = buildcfg.detect_from_pkgconfig()
else:
detected = False
# Check whether we have already compiled igraph in a previous run.
# If so, it should be found in igraphcore/include and
# igraphcore/lib
if os.path.exists("igraphcore"):
buildcfg.use_built_igraph()
detected = True
# Download and compile igraph if the user did not disable it and
# we do not know the libraries from pkg-config yet
if not detected:
if buildcfg.download_igraph_if_needed and is_unix_like():
detected = buildcfg.download_and_compile_igraph()
if detected:
buildcfg.use_built_igraph()
# Fall back to an educated guess if everything else failed
if not detected:
buildcfg.use_educated_guess()
# Replaces library names with full paths to static libraries
# where possible
if buildcfg.static_extension:
buildcfg.replace_static_libraries(exclusions=["m"])
# Prints basic build information
buildcfg.print_build_info()
ext = first(extension for extension in self.extensions
if extension.name == "louvain._c_louvain")
buildcfg.configure(ext)
# Run the original build_ext command
build_ext.run(self)
return custom_build_ext
|
python
|
def build_ext(self):
"""Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed."""
try:
from setuptools.command.build_ext import build_ext
except ImportError:
from distutils.command.build_ext import build_ext
buildcfg = self
class custom_build_ext(build_ext):
def run(self):
# Print a warning if pkg-config is not available or does not know about igraph
if buildcfg.use_pkgconfig:
detected = buildcfg.detect_from_pkgconfig()
else:
detected = False
# Check whether we have already compiled igraph in a previous run.
# If so, it should be found in igraphcore/include and
# igraphcore/lib
if os.path.exists("igraphcore"):
buildcfg.use_built_igraph()
detected = True
# Download and compile igraph if the user did not disable it and
# we do not know the libraries from pkg-config yet
if not detected:
if buildcfg.download_igraph_if_needed and is_unix_like():
detected = buildcfg.download_and_compile_igraph()
if detected:
buildcfg.use_built_igraph()
# Fall back to an educated guess if everything else failed
if not detected:
buildcfg.use_educated_guess()
# Replaces library names with full paths to static libraries
# where possible
if buildcfg.static_extension:
buildcfg.replace_static_libraries(exclusions=["m"])
# Prints basic build information
buildcfg.print_build_info()
ext = first(extension for extension in self.extensions
if extension.name == "louvain._c_louvain")
buildcfg.configure(ext)
# Run the original build_ext command
build_ext.run(self)
return custom_build_ext
|
[
"def",
"build_ext",
"(",
"self",
")",
":",
"try",
":",
"from",
"setuptools",
".",
"command",
".",
"build_ext",
"import",
"build_ext",
"except",
"ImportError",
":",
"from",
"distutils",
".",
"command",
".",
"build_ext",
"import",
"build_ext",
"buildcfg",
"=",
"self",
"class",
"custom_build_ext",
"(",
"build_ext",
")",
":",
"def",
"run",
"(",
"self",
")",
":",
"# Print a warning if pkg-config is not available or does not know about igraph",
"if",
"buildcfg",
".",
"use_pkgconfig",
":",
"detected",
"=",
"buildcfg",
".",
"detect_from_pkgconfig",
"(",
")",
"else",
":",
"detected",
"=",
"False",
"# Check whether we have already compiled igraph in a previous run.",
"# If so, it should be found in igraphcore/include and",
"# igraphcore/lib",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"igraphcore\"",
")",
":",
"buildcfg",
".",
"use_built_igraph",
"(",
")",
"detected",
"=",
"True",
"# Download and compile igraph if the user did not disable it and",
"# we do not know the libraries from pkg-config yet",
"if",
"not",
"detected",
":",
"if",
"buildcfg",
".",
"download_igraph_if_needed",
"and",
"is_unix_like",
"(",
")",
":",
"detected",
"=",
"buildcfg",
".",
"download_and_compile_igraph",
"(",
")",
"if",
"detected",
":",
"buildcfg",
".",
"use_built_igraph",
"(",
")",
"# Fall back to an educated guess if everything else failed",
"if",
"not",
"detected",
":",
"buildcfg",
".",
"use_educated_guess",
"(",
")",
"# Replaces library names with full paths to static libraries",
"# where possible",
"if",
"buildcfg",
".",
"static_extension",
":",
"buildcfg",
".",
"replace_static_libraries",
"(",
"exclusions",
"=",
"[",
"\"m\"",
"]",
")",
"# Prints basic build information",
"buildcfg",
".",
"print_build_info",
"(",
")",
"ext",
"=",
"first",
"(",
"extension",
"for",
"extension",
"in",
"self",
".",
"extensions",
"if",
"extension",
".",
"name",
"==",
"\"louvain._c_louvain\"",
")",
"buildcfg",
".",
"configure",
"(",
"ext",
")",
"# Run the original build_ext command",
"build_ext",
".",
"run",
"(",
"self",
")",
"return",
"custom_build_ext"
] |
Returns a class that can be used as a replacement for the
``build_ext`` command in ``distutils`` and that will download and
compile the C core of igraph if needed.
|
[
"Returns",
"a",
"class",
"that",
"can",
"be",
"used",
"as",
"a",
"replacement",
"for",
"the",
"build_ext",
"command",
"in",
"distutils",
"and",
"that",
"will",
"download",
"and",
"compile",
"the",
"C",
"core",
"of",
"igraph",
"if",
"needed",
"."
] |
8de2c3bad736a9deea90b80f104d8444769d331f
|
https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/setup.py#L353-L405
|
15,266
|
vtraag/louvain-igraph
|
src/VertexPartition.py
|
CPMVertexPartition.Bipartite
|
def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1
|
python
|
def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1
|
[
"def",
"Bipartite",
"(",
"graph",
",",
"resolution_parameter_01",
",",
"resolution_parameter_0",
"=",
"0",
",",
"resolution_parameter_1",
"=",
"0",
",",
"degree_as_node_size",
"=",
"False",
",",
"types",
"=",
"'type'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"types",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"types",
",",
"str",
")",
":",
"types",
"=",
"graph",
".",
"vs",
"[",
"types",
"]",
"else",
":",
"# Make sure it is a list",
"types",
"=",
"list",
"(",
"types",
")",
"if",
"set",
"(",
"types",
")",
"!=",
"set",
"(",
"[",
"0",
",",
"1",
"]",
")",
":",
"new_type",
"=",
"_ig",
".",
"UniqueIdGenerator",
"(",
")",
"types",
"=",
"[",
"new_type",
"[",
"t",
"]",
"for",
"t",
"in",
"types",
"]",
"if",
"set",
"(",
"types",
")",
"!=",
"set",
"(",
"[",
"0",
",",
"1",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"More than one type specified.\"",
")",
"if",
"degree_as_node_size",
":",
"if",
"(",
"graph",
".",
"is_directed",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"This method is not suitable for directed graphs \"",
"+",
"\"when using degree as node sizes.\"",
")",
"node_sizes",
"=",
"graph",
".",
"degree",
"(",
")",
"else",
":",
"node_sizes",
"=",
"[",
"1",
"]",
"*",
"graph",
".",
"vcount",
"(",
")",
"partition_01",
"=",
"CPMVertexPartition",
"(",
"graph",
",",
"node_sizes",
"=",
"node_sizes",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
",",
"*",
"*",
"kwargs",
")",
"H_0",
"=",
"graph",
".",
"subgraph_edges",
"(",
"[",
"]",
",",
"delete_vertices",
"=",
"False",
")",
"partition_0",
"=",
"CPMVertexPartition",
"(",
"H_0",
",",
"weights",
"=",
"None",
",",
"node_sizes",
"=",
"[",
"s",
"if",
"t",
"==",
"0",
"else",
"0",
"for",
"v",
",",
"s",
",",
"t",
"in",
"zip",
"(",
"graph",
".",
"vs",
",",
"node_sizes",
",",
"types",
")",
"]",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
"-",
"resolution_parameter_0",
",",
"*",
"*",
"kwargs",
")",
"H_1",
"=",
"graph",
".",
"subgraph_edges",
"(",
"[",
"]",
",",
"delete_vertices",
"=",
"False",
")",
"partition_1",
"=",
"CPMVertexPartition",
"(",
"H_1",
",",
"weights",
"=",
"None",
",",
"node_sizes",
"=",
"[",
"s",
"if",
"t",
"==",
"1",
"else",
"0",
"for",
"v",
",",
"s",
",",
"t",
"in",
"zip",
"(",
"graph",
".",
"vs",
",",
"node_sizes",
",",
"types",
")",
"]",
",",
"resolution_parameter",
"=",
"resolution_parameter_01",
"-",
"resolution_parameter_1",
",",
"*",
"*",
"kwargs",
")",
"return",
"partition_01",
",",
"partition_0",
",",
"partition_1"
] |
Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
|
[
"Create",
"three",
"layers",
"for",
"bipartite",
"partitions",
"."
] |
8de2c3bad736a9deea90b80f104d8444769d331f
|
https://github.com/vtraag/louvain-igraph/blob/8de2c3bad736a9deea90b80f104d8444769d331f/src/VertexPartition.py#L865-L1009
|
15,267
|
vinta/pangu.py
|
pangu.py
|
spacing_file
|
def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read())
|
python
|
def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read())
|
[
"def",
"spacing_file",
"(",
"path",
")",
":",
"# TODO: read line by line",
"with",
"open",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"as",
"f",
":",
"return",
"spacing_text",
"(",
"f",
".",
"read",
"(",
")",
")"
] |
Perform paranoid text spacing from file.
|
[
"Perform",
"paranoid",
"text",
"spacing",
"from",
"file",
"."
] |
89407cf08dedf9d895c13053dd518d11a20f6c95
|
https://github.com/vinta/pangu.py/blob/89407cf08dedf9d895c13053dd518d11a20f6c95/pangu.py#L156-L162
|
15,268
|
EventRegistry/event-registry-python
|
eventregistry/EventForText.py
|
GetEventForText.compute
|
def compute(self,
text, # text for which to find the most similar event
lang = "eng"): # language in which the text is written
"""
compute the list of most similar events for the given text
"""
params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn }
res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params)
requestId = res["requestId"]
for i in range(10):
time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation
res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId })
if isinstance(res, list) and len(res) > 0:
return res
return None
|
python
|
def compute(self,
text, # text for which to find the most similar event
lang = "eng"): # language in which the text is written
"""
compute the list of most similar events for the given text
"""
params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn }
res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params)
requestId = res["requestId"]
for i in range(10):
time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation
res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId })
if isinstance(res, list) and len(res) > 0:
return res
return None
|
[
"def",
"compute",
"(",
"self",
",",
"text",
",",
"# text for which to find the most similar event",
"lang",
"=",
"\"eng\"",
")",
":",
"# language in which the text is written",
"params",
"=",
"{",
"\"lang\"",
":",
"lang",
",",
"\"text\"",
":",
"text",
",",
"\"topClustersCount\"",
":",
"self",
".",
"_nrOfEventsToReturn",
"}",
"res",
"=",
"self",
".",
"_er",
".",
"jsonRequest",
"(",
"\"/json/getEventForText/enqueueRequest\"",
",",
"params",
")",
"requestId",
"=",
"res",
"[",
"\"requestId\"",
"]",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"# sleep for 1 second to wait for the clustering to perform computation",
"res",
"=",
"self",
".",
"_er",
".",
"jsonRequest",
"(",
"\"/json/getEventForText/testRequest\"",
",",
"{",
"\"requestId\"",
":",
"requestId",
"}",
")",
"if",
"isinstance",
"(",
"res",
",",
"list",
")",
"and",
"len",
"(",
"res",
")",
">",
"0",
":",
"return",
"res",
"return",
"None"
] |
compute the list of most similar events for the given text
|
[
"compute",
"the",
"list",
"of",
"most",
"similar",
"events",
"for",
"the",
"given",
"text"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/EventForText.py#L42-L57
|
15,269
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.annotate
|
def annotate(self, text, lang = None, customParams = None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {"lang": lang, "text": text}
if customParams:
params.update(customParams)
return self._er.jsonRequestAnalytics("/api/v1/annotate", params)
|
python
|
def annotate(self, text, lang = None, customParams = None):
"""
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
"""
params = {"lang": lang, "text": text}
if customParams:
params.update(customParams)
return self._er.jsonRequestAnalytics("/api/v1/annotate", params)
|
[
"def",
"annotate",
"(",
"self",
",",
"text",
",",
"lang",
"=",
"None",
",",
"customParams",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"lang\"",
":",
"lang",
",",
"\"text\"",
":",
"text",
"}",
"if",
"customParams",
":",
"params",
".",
"update",
"(",
"customParams",
")",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/annotate\"",
",",
"params",
")"
] |
identify the list of entities and nonentities mentioned in the text
@param text: input text to annotate
@param lang: language of the provided document (can be an ISO2 or ISO3 code). If None is provided, the language will be automatically detected
@param customParams: None or a dict with custom parameters to send to the annotation service
@returns: dict
|
[
"identify",
"the",
"list",
"of",
"entities",
"and",
"nonentities",
"mentioned",
"in",
"the",
"text"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L25-L36
|
15,270
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.sentiment
|
def sentiment(self, text, method = "vocabulary"):
"""
determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict
"""
assert method == "vocabulary" or method == "rnn"
endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN"
return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text })
|
python
|
def sentiment(self, text, method = "vocabulary"):
"""
determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict
"""
assert method == "vocabulary" or method == "rnn"
endpoint = method == "vocabulary" and "sentiment" or "sentimentRNN"
return self._er.jsonRequestAnalytics("/api/v1/" + endpoint, { "text": text })
|
[
"def",
"sentiment",
"(",
"self",
",",
"text",
",",
"method",
"=",
"\"vocabulary\"",
")",
":",
"assert",
"method",
"==",
"\"vocabulary\"",
"or",
"method",
"==",
"\"rnn\"",
"endpoint",
"=",
"method",
"==",
"\"vocabulary\"",
"and",
"\"sentiment\"",
"or",
"\"sentimentRNN\"",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/\"",
"+",
"endpoint",
",",
"{",
"\"text\"",
":",
"text",
"}",
")"
] |
determine the sentiment of the provided text in English language
@param text: input text to categorize
@param method: method to use to compute the sentiment. possible values are "vocabulary" (vocabulary based sentiment analysis)
and "rnn" (neural network based sentiment classification)
@returns: dict
|
[
"determine",
"the",
"sentiment",
"of",
"the",
"provided",
"text",
"in",
"English",
"language"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L50-L60
|
15,271
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.semanticSimilarity
|
def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure })
|
python
|
def semanticSimilarity(self, text1, text2, distanceMeasure = "cosine"):
"""
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
"""
return self._er.jsonRequestAnalytics("/api/v1/semanticSimilarity", { "text1": text1, "text2": text2, "distanceMeasure": distanceMeasure })
|
[
"def",
"semanticSimilarity",
"(",
"self",
",",
"text1",
",",
"text2",
",",
"distanceMeasure",
"=",
"\"cosine\"",
")",
":",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/semanticSimilarity\"",
",",
"{",
"\"text1\"",
":",
"text1",
",",
"\"text2\"",
":",
"text2",
",",
"\"distanceMeasure\"",
":",
"distanceMeasure",
"}",
")"
] |
determine the semantic similarity of the two provided documents
@param text1: first document to analyze
@param text2: second document to analyze
@param distanceMeasure: distance measure to use for comparing two documents. Possible values are "cosine" (default) or "jaccard"
@returns: dict
|
[
"determine",
"the",
"semantic",
"similarity",
"of",
"the",
"two",
"provided",
"documents"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L63-L71
|
15,272
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.extractArticleInfo
|
def extractArticleInfo(self, url, proxyUrl = None, headers = None, cookies = None):
"""
extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict
"""
params = { "url": url }
if proxyUrl:
params["proxyUrl"] = proxyUrl
if headers:
if isinstance(headers, dict):
headers = json.dumps(headers)
params["headers"] = headers
if cookies:
if isinstance(cookies, dict):
cookies = json.dumps(cookies)
params["cookies"] = cookies
return self._er.jsonRequestAnalytics("/api/v1/extractArticleInfo", params)
|
python
|
def extractArticleInfo(self, url, proxyUrl = None, headers = None, cookies = None):
"""
extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict
"""
params = { "url": url }
if proxyUrl:
params["proxyUrl"] = proxyUrl
if headers:
if isinstance(headers, dict):
headers = json.dumps(headers)
params["headers"] = headers
if cookies:
if isinstance(cookies, dict):
cookies = json.dumps(cookies)
params["cookies"] = cookies
return self._er.jsonRequestAnalytics("/api/v1/extractArticleInfo", params)
|
[
"def",
"extractArticleInfo",
"(",
"self",
",",
"url",
",",
"proxyUrl",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"cookies",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"url\"",
":",
"url",
"}",
"if",
"proxyUrl",
":",
"params",
"[",
"\"proxyUrl\"",
"]",
"=",
"proxyUrl",
"if",
"headers",
":",
"if",
"isinstance",
"(",
"headers",
",",
"dict",
")",
":",
"headers",
"=",
"json",
".",
"dumps",
"(",
"headers",
")",
"params",
"[",
"\"headers\"",
"]",
"=",
"headers",
"if",
"cookies",
":",
"if",
"isinstance",
"(",
"cookies",
",",
"dict",
")",
":",
"cookies",
"=",
"json",
".",
"dumps",
"(",
"cookies",
")",
"params",
"[",
"\"cookies\"",
"]",
"=",
"cookies",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/extractArticleInfo\"",
",",
"params",
")"
] |
extract all available information about an article available at url `url`. Returned information will include
article title, body, authors, links in the articles, ...
@param url: article url to extract article information from
@param proxyUrl: proxy that should be used for downloading article information. format: {schema}://{username}:{pass}@{proxy url/ip}
@param headers: dict with headers to set in the request (optional)
@param cookies: dict with cookies to set in the request (optional)
@returns: dict
|
[
"extract",
"all",
"available",
"information",
"about",
"an",
"article",
"available",
"at",
"url",
"url",
".",
"Returned",
"information",
"will",
"include",
"article",
"title",
"body",
"authors",
"links",
"in",
"the",
"articles",
"..."
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L83-L104
|
15,273
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.trainTopicOnTweets
|
def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True,
normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[],
maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None):
"""
create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address?
"""
assert maxTweets < 5000, "we can analyze at most 5000 tweets"
params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText,
"useIdfNormalization": useIdfNormalization, "normalization": normalization,
"maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks,
"maxConcepts": maxConcepts, "maxCategories": maxCategories }
if notifyEmailAddress:
params["notifyEmailAddress"] = notifyEmailAddress
if len(ignoreConceptTypes) > 0:
params["ignoreConceptTypes"] = ignoreConceptTypes
return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params)
|
python
|
def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True,
normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[],
maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None):
"""
create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address?
"""
assert maxTweets < 5000, "we can analyze at most 5000 tweets"
params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText,
"useIdfNormalization": useIdfNormalization, "normalization": normalization,
"maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks,
"maxConcepts": maxConcepts, "maxCategories": maxCategories }
if notifyEmailAddress:
params["notifyEmailAddress"] = notifyEmailAddress
if len(ignoreConceptTypes) > 0:
params["ignoreConceptTypes"] = ignoreConceptTypes
return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params)
|
[
"def",
"trainTopicOnTweets",
"(",
"self",
",",
"twitterQuery",
",",
"useTweetText",
"=",
"True",
",",
"useIdfNormalization",
"=",
"True",
",",
"normalization",
"=",
"\"linear\"",
",",
"maxTweets",
"=",
"2000",
",",
"maxUsedLinks",
"=",
"500",
",",
"ignoreConceptTypes",
"=",
"[",
"]",
",",
"maxConcepts",
"=",
"20",
",",
"maxCategories",
"=",
"10",
",",
"notifyEmailAddress",
"=",
"None",
")",
":",
"assert",
"maxTweets",
"<",
"5000",
",",
"\"we can analyze at most 5000 tweets\"",
"params",
"=",
"{",
"\"twitterQuery\"",
":",
"twitterQuery",
",",
"\"useTweetText\"",
":",
"useTweetText",
",",
"\"useIdfNormalization\"",
":",
"useIdfNormalization",
",",
"\"normalization\"",
":",
"normalization",
",",
"\"maxTweets\"",
":",
"maxTweets",
",",
"\"maxUsedLinks\"",
":",
"maxUsedLinks",
",",
"\"maxConcepts\"",
":",
"maxConcepts",
",",
"\"maxCategories\"",
":",
"maxCategories",
"}",
"if",
"notifyEmailAddress",
":",
"params",
"[",
"\"notifyEmailAddress\"",
"]",
"=",
"notifyEmailAddress",
"if",
"len",
"(",
"ignoreConceptTypes",
")",
">",
"0",
":",
"params",
"[",
"\"ignoreConceptTypes\"",
"]",
"=",
"ignoreConceptTypes",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/trainTopicOnTwitter\"",
",",
"params",
")"
] |
create a new topic and train it using the tweets that match the twitterQuery
@param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url),
a hash tag (using "#" prefix) or a regular keyword.
@param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared
in the articles in the user's tweets will be analyzed
@param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts)
@param normalization: way to normalize the concept weights ("none", "linear")
@param maxTweets: maximum number of tweets to collect (default 2000, max 5000)
@param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000)
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param maxConcepts: the number of concepts to save in the final topic
@param maxCategories: the number of categories to save in the final topic
@param maxTweets: the maximum number of tweets to collect for the user to analyze
@param notifyEmailAddress: when finished, should we send a notification email to this address?
|
[
"create",
"a",
"new",
"topic",
"and",
"train",
"it",
"using",
"the",
"tweets",
"that",
"match",
"the",
"twitterQuery"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L116-L144
|
15,274
|
EventRegistry/event-registry-python
|
eventregistry/Analytics.py
|
Analytics.trainTopicGetTrainedTopic
|
def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10,
ignoreConceptTypes=[], idfNormalization = True):
"""
retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] }
"""
return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization })
|
python
|
def trainTopicGetTrainedTopic(self, uri, maxConcepts = 20, maxCategories = 10,
ignoreConceptTypes=[], idfNormalization = True):
"""
retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] }
"""
return self._er.jsonRequestAnalytics("/api/v1/trainTopic", { "action": "getTrainedTopic", "uri": uri, "maxConcepts": maxConcepts, "maxCategories": maxCategories, "idfNormalization": idfNormalization })
|
[
"def",
"trainTopicGetTrainedTopic",
"(",
"self",
",",
"uri",
",",
"maxConcepts",
"=",
"20",
",",
"maxCategories",
"=",
"10",
",",
"ignoreConceptTypes",
"=",
"[",
"]",
",",
"idfNormalization",
"=",
"True",
")",
":",
"return",
"self",
".",
"_er",
".",
"jsonRequestAnalytics",
"(",
"\"/api/v1/trainTopic\"",
",",
"{",
"\"action\"",
":",
"\"getTrainedTopic\"",
",",
"\"uri\"",
":",
"uri",
",",
"\"maxConcepts\"",
":",
"maxConcepts",
",",
"\"maxCategories\"",
":",
"maxCategories",
",",
"\"idfNormalization\"",
":",
"idfNormalization",
"}",
")"
] |
retrieve topic for the topic for which you have already finished training
@param uri: uri of the topic (obtained by calling trainTopicCreateTopic method)
@param maxConcepts: number of top concepts to retrieve in the topic
@param maxCategories: number of top categories to retrieve in the topic
@param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those
@param idfNormalization: should the concepts be normalized by punishing the commonly mentioned concepts
@param returns: returns the trained topic: { concepts: [], categories: [] }
|
[
"retrieve",
"topic",
"for",
"the",
"topic",
"for",
"which",
"you",
"have",
"already",
"finished",
"training"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/Analytics.py#L172-L183
|
15,275
|
EventRegistry/event-registry-python
|
eventregistry/examples/TopicPagesExamples.py
|
createTopicPage1
|
def createTopicPage1():
"""
create a topic page directly
"""
topic = TopicPage(er)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
topic.addCategory(er.getCategoryUri("renewable"), 50)
# skip articles that are duplicates of other articles
topic.articleHasDuplicateFilter("skipHasDuplicates")
# return only articles that are about some event that we have detected
topic.articleHasEventFilter("skipArticlesWithoutEvent")
# get first 2 pages of articles sorted by relevance to the topic page
arts1 = topic.getArticles(page=1, sortBy="rel")
arts2 = topic.getArticles(page=2, sortBy="rel")
# get first page of events
events1 = topic.getEvents(page=1)
|
python
|
def createTopicPage1():
"""
create a topic page directly
"""
topic = TopicPage(er)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
topic.addCategory(er.getCategoryUri("renewable"), 50)
# skip articles that are duplicates of other articles
topic.articleHasDuplicateFilter("skipHasDuplicates")
# return only articles that are about some event that we have detected
topic.articleHasEventFilter("skipArticlesWithoutEvent")
# get first 2 pages of articles sorted by relevance to the topic page
arts1 = topic.getArticles(page=1, sortBy="rel")
arts2 = topic.getArticles(page=2, sortBy="rel")
# get first page of events
events1 = topic.getEvents(page=1)
|
[
"def",
"createTopicPage1",
"(",
")",
":",
"topic",
"=",
"TopicPage",
"(",
"er",
")",
"topic",
".",
"addKeyword",
"(",
"\"renewable energy\"",
",",
"30",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"biofuel\"",
")",
",",
"50",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"solar energy\"",
")",
",",
"50",
")",
"topic",
".",
"addCategory",
"(",
"er",
".",
"getCategoryUri",
"(",
"\"renewable\"",
")",
",",
"50",
")",
"# skip articles that are duplicates of other articles",
"topic",
".",
"articleHasDuplicateFilter",
"(",
"\"skipHasDuplicates\"",
")",
"# return only articles that are about some event that we have detected",
"topic",
".",
"articleHasEventFilter",
"(",
"\"skipArticlesWithoutEvent\"",
")",
"# get first 2 pages of articles sorted by relevance to the topic page",
"arts1",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"1",
",",
"sortBy",
"=",
"\"rel\"",
")",
"arts2",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"2",
",",
"sortBy",
"=",
"\"rel\"",
")",
"# get first page of events",
"events1",
"=",
"topic",
".",
"getEvents",
"(",
"page",
"=",
"1",
")"
] |
create a topic page directly
|
[
"create",
"a",
"topic",
"page",
"directly"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/examples/TopicPagesExamples.py#L6-L26
|
15,276
|
EventRegistry/event-registry-python
|
eventregistry/examples/TopicPagesExamples.py
|
createTopicPage2
|
def createTopicPage2():
"""
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
"""
topic = TopicPage(er)
topic.addCategory(er.getCategoryUri("renewable"), 50)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
# require that the results will mention at least one of the concepts and keywords specified
# (even though they might have the category about renewable energy, that will not be enough
# for an article to be among the results)
topic.restrictToSetConceptsAndKeywords(True)
# limit results to English, German and Spanish results
topic.setLanguages(["eng", "deu", "spa"])
# get results that are at most 3 days old
topic.setMaxDaysBack(3)
# require that the articles that will be returned should get at least a total score of 30 points or more
# based on the specified list of conditions
topic.setArticleThreshold(30)
# get first page of articles sorted by date (from most recent backward) to the topic page
arts1 = topic.getArticles(page=1,
sortBy="date",
returnInfo=ReturnInfo(
articleInfo = ArticleInfoFlags(concepts=True, categories=True)
))
for art in arts1.get("articles", {}).get("results", []):
print(art)
|
python
|
def createTopicPage2():
"""
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
"""
topic = TopicPage(er)
topic.addCategory(er.getCategoryUri("renewable"), 50)
topic.addKeyword("renewable energy", 30)
topic.addConcept(er.getConceptUri("biofuel"), 50)
topic.addConcept(er.getConceptUri("solar energy"), 50)
# require that the results will mention at least one of the concepts and keywords specified
# (even though they might have the category about renewable energy, that will not be enough
# for an article to be among the results)
topic.restrictToSetConceptsAndKeywords(True)
# limit results to English, German and Spanish results
topic.setLanguages(["eng", "deu", "spa"])
# get results that are at most 3 days old
topic.setMaxDaysBack(3)
# require that the articles that will be returned should get at least a total score of 30 points or more
# based on the specified list of conditions
topic.setArticleThreshold(30)
# get first page of articles sorted by date (from most recent backward) to the topic page
arts1 = topic.getArticles(page=1,
sortBy="date",
returnInfo=ReturnInfo(
articleInfo = ArticleInfoFlags(concepts=True, categories=True)
))
for art in arts1.get("articles", {}).get("results", []):
print(art)
|
[
"def",
"createTopicPage2",
"(",
")",
":",
"topic",
"=",
"TopicPage",
"(",
"er",
")",
"topic",
".",
"addCategory",
"(",
"er",
".",
"getCategoryUri",
"(",
"\"renewable\"",
")",
",",
"50",
")",
"topic",
".",
"addKeyword",
"(",
"\"renewable energy\"",
",",
"30",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"biofuel\"",
")",
",",
"50",
")",
"topic",
".",
"addConcept",
"(",
"er",
".",
"getConceptUri",
"(",
"\"solar energy\"",
")",
",",
"50",
")",
"# require that the results will mention at least one of the concepts and keywords specified",
"# (even though they might have the category about renewable energy, that will not be enough",
"# for an article to be among the results)",
"topic",
".",
"restrictToSetConceptsAndKeywords",
"(",
"True",
")",
"# limit results to English, German and Spanish results",
"topic",
".",
"setLanguages",
"(",
"[",
"\"eng\"",
",",
"\"deu\"",
",",
"\"spa\"",
"]",
")",
"# get results that are at most 3 days old",
"topic",
".",
"setMaxDaysBack",
"(",
"3",
")",
"# require that the articles that will be returned should get at least a total score of 30 points or more",
"# based on the specified list of conditions",
"topic",
".",
"setArticleThreshold",
"(",
"30",
")",
"# get first page of articles sorted by date (from most recent backward) to the topic page",
"arts1",
"=",
"topic",
".",
"getArticles",
"(",
"page",
"=",
"1",
",",
"sortBy",
"=",
"\"date\"",
",",
"returnInfo",
"=",
"ReturnInfo",
"(",
"articleInfo",
"=",
"ArticleInfoFlags",
"(",
"concepts",
"=",
"True",
",",
"categories",
"=",
"True",
")",
")",
")",
"for",
"art",
"in",
"arts1",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"results\"",
",",
"[",
"]",
")",
":",
"print",
"(",
"art",
")"
] |
create a topic page directly, set the article threshold, restrict results to set concepts and keywords
|
[
"create",
"a",
"topic",
"page",
"directly",
"set",
"the",
"article",
"threshold",
"restrict",
"results",
"to",
"set",
"concepts",
"and",
"keywords"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/examples/TopicPagesExamples.py#L30-L63
|
15,277
|
EventRegistry/event-registry-python
|
eventregistry/QueryEvent.py
|
QueryEventArticlesIter.count
|
def count(self, eventRegistry):
"""
return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data
"""
self.setRequestedResult(RequestEventArticles(**self.queryParams))
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get(self.queryParams["eventUri"], {}).get("articles", {}).get("totalResults", 0)
return count
|
python
|
def count(self, eventRegistry):
"""
return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data
"""
self.setRequestedResult(RequestEventArticles(**self.queryParams))
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get(self.queryParams["eventUri"], {}).get("articles", {}).get("totalResults", 0)
return count
|
[
"def",
"count",
"(",
"self",
",",
"eventRegistry",
")",
":",
"self",
".",
"setRequestedResult",
"(",
"RequestEventArticles",
"(",
"*",
"*",
"self",
".",
"queryParams",
")",
")",
"res",
"=",
"eventRegistry",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"res",
"[",
"\"error\"",
"]",
")",
"count",
"=",
"res",
".",
"get",
"(",
"self",
".",
"queryParams",
"[",
"\"eventUri\"",
"]",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"totalResults\"",
",",
"0",
")",
"return",
"count"
] |
return the number of articles that match the criteria
@param eventRegistry: instance of EventRegistry class. used to obtain the necessary data
|
[
"return",
"the",
"number",
"of",
"articles",
"that",
"match",
"the",
"criteria"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvent.py#L150-L160
|
15,278
|
EventRegistry/event-registry-python
|
eventregistry/QueryArticles.py
|
QueryArticles.initWithComplexQuery
|
def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q
|
python
|
def initWithComplexQuery(query):
"""
create a query using a complex article query
"""
q = QueryArticles()
# provided an instance of ComplexArticleQuery
if isinstance(query, ComplexArticleQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
else:
assert False, "The instance of query parameter was not a ComplexArticleQuery, a string or a python dict"
return q
|
[
"def",
"initWithComplexQuery",
"(",
"query",
")",
":",
"q",
"=",
"QueryArticles",
"(",
")",
"# provided an instance of ComplexArticleQuery",
"if",
"isinstance",
"(",
"query",
",",
"ComplexArticleQuery",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
".",
"getQuery",
"(",
")",
")",
")",
"# provided query as a string containing the json object",
"elif",
"isinstance",
"(",
"query",
",",
"six",
".",
"string_types",
")",
":",
"foo",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"query",
")",
"# provided query as a python dict",
"elif",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
")",
")",
"else",
":",
"assert",
"False",
",",
"\"The instance of query parameter was not a ComplexArticleQuery, a string or a python dict\"",
"return",
"q"
] |
create a query using a complex article query
|
[
"create",
"a",
"query",
"using",
"a",
"complex",
"article",
"query"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryArticles.py#L218-L235
|
15,279
|
EventRegistry/event-registry-python
|
eventregistry/QueryArticles.py
|
QueryArticlesIter._getNextArticleBatch
|
def _getNextArticleBatch(self):
"""download next batch of articles based on the article uris in the uri list"""
# try to get more uris, if none
self._articlePage += 1
# if we have already obtained all pages, then exit
if self._totalPages != None and self._articlePage > self._totalPages:
return
self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,
sortBy=self._sortBy, sortByAsc=self._sortByAsc,
returnInfo = self._returnInfo))
if self._er._verboseOutput:
print("Downloading article page %d..." % (self._articlePage))
res = self._er.execQuery(self)
if "error" in res:
print("Error while obtaining a list of articles: " + res["error"])
else:
self._totalPages = res.get("articles", {}).get("pages", 0)
results = res.get("articles", {}).get("results", [])
self._articleList.extend(results)
|
python
|
def _getNextArticleBatch(self):
"""download next batch of articles based on the article uris in the uri list"""
# try to get more uris, if none
self._articlePage += 1
# if we have already obtained all pages, then exit
if self._totalPages != None and self._articlePage > self._totalPages:
return
self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,
sortBy=self._sortBy, sortByAsc=self._sortByAsc,
returnInfo = self._returnInfo))
if self._er._verboseOutput:
print("Downloading article page %d..." % (self._articlePage))
res = self._er.execQuery(self)
if "error" in res:
print("Error while obtaining a list of articles: " + res["error"])
else:
self._totalPages = res.get("articles", {}).get("pages", 0)
results = res.get("articles", {}).get("results", [])
self._articleList.extend(results)
|
[
"def",
"_getNextArticleBatch",
"(",
"self",
")",
":",
"# try to get more uris, if none",
"self",
".",
"_articlePage",
"+=",
"1",
"# if we have already obtained all pages, then exit",
"if",
"self",
".",
"_totalPages",
"!=",
"None",
"and",
"self",
".",
"_articlePage",
">",
"self",
".",
"_totalPages",
":",
"return",
"self",
".",
"setRequestedResult",
"(",
"RequestArticlesInfo",
"(",
"page",
"=",
"self",
".",
"_articlePage",
",",
"sortBy",
"=",
"self",
".",
"_sortBy",
",",
"sortByAsc",
"=",
"self",
".",
"_sortByAsc",
",",
"returnInfo",
"=",
"self",
".",
"_returnInfo",
")",
")",
"if",
"self",
".",
"_er",
".",
"_verboseOutput",
":",
"print",
"(",
"\"Downloading article page %d...\"",
"%",
"(",
"self",
".",
"_articlePage",
")",
")",
"res",
"=",
"self",
".",
"_er",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"\"Error while obtaining a list of articles: \"",
"+",
"res",
"[",
"\"error\"",
"]",
")",
"else",
":",
"self",
".",
"_totalPages",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"pages\"",
",",
"0",
")",
"results",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"results\"",
",",
"[",
"]",
")",
"self",
".",
"_articleList",
".",
"extend",
"(",
"results",
")"
] |
download next batch of articles based on the article uris in the uri list
|
[
"download",
"next",
"batch",
"of",
"articles",
"based",
"on",
"the",
"article",
"uris",
"in",
"the",
"uri",
"list"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryArticles.py#L317-L335
|
15,280
|
EventRegistry/event-registry-python
|
eventregistry/QueryEvents.py
|
QueryEvents.initWithComplexQuery
|
def initWithComplexQuery(query):
"""
create a query using a complex event query
"""
q = QueryEvents()
# provided an instance of ComplexEventQuery
if isinstance(query, ComplexEventQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
# unrecognized value provided
else:
assert False, "The instance of query parameter was not a ComplexEventQuery, a string or a python dict"
return q
|
python
|
def initWithComplexQuery(query):
"""
create a query using a complex event query
"""
q = QueryEvents()
# provided an instance of ComplexEventQuery
if isinstance(query, ComplexEventQuery):
q._setVal("query", json.dumps(query.getQuery()))
# provided query as a string containing the json object
elif isinstance(query, six.string_types):
foo = json.loads(query)
q._setVal("query", query)
# provided query as a python dict
elif isinstance(query, dict):
q._setVal("query", json.dumps(query))
# unrecognized value provided
else:
assert False, "The instance of query parameter was not a ComplexEventQuery, a string or a python dict"
return q
|
[
"def",
"initWithComplexQuery",
"(",
"query",
")",
":",
"q",
"=",
"QueryEvents",
"(",
")",
"# provided an instance of ComplexEventQuery",
"if",
"isinstance",
"(",
"query",
",",
"ComplexEventQuery",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
".",
"getQuery",
"(",
")",
")",
")",
"# provided query as a string containing the json object",
"elif",
"isinstance",
"(",
"query",
",",
"six",
".",
"string_types",
")",
":",
"foo",
"=",
"json",
".",
"loads",
"(",
"query",
")",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"query",
")",
"# provided query as a python dict",
"elif",
"isinstance",
"(",
"query",
",",
"dict",
")",
":",
"q",
".",
"_setVal",
"(",
"\"query\"",
",",
"json",
".",
"dumps",
"(",
"query",
")",
")",
"# unrecognized value provided",
"else",
":",
"assert",
"False",
",",
"\"The instance of query parameter was not a ComplexEventQuery, a string or a python dict\"",
"return",
"q"
] |
create a query using a complex event query
|
[
"create",
"a",
"query",
"using",
"a",
"complex",
"event",
"query"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L183-L201
|
15,281
|
EventRegistry/event-registry-python
|
eventregistry/QueryEvents.py
|
QueryEventsIter.count
|
def count(self, eventRegistry):
"""
return the number of events that match the criteria
"""
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count
|
python
|
def count(self, eventRegistry):
"""
return the number of events that match the criteria
"""
self.setRequestedResult(RequestEventsInfo())
res = eventRegistry.execQuery(self)
if "error" in res:
print(res["error"])
count = res.get("events", {}).get("totalResults", 0)
return count
|
[
"def",
"count",
"(",
"self",
",",
"eventRegistry",
")",
":",
"self",
".",
"setRequestedResult",
"(",
"RequestEventsInfo",
"(",
")",
")",
"res",
"=",
"eventRegistry",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"res",
"[",
"\"error\"",
"]",
")",
"count",
"=",
"res",
".",
"get",
"(",
"\"events\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"totalResults\"",
",",
"0",
")",
"return",
"count"
] |
return the number of events that match the criteria
|
[
"return",
"the",
"number",
"of",
"events",
"that",
"match",
"the",
"criteria"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/QueryEvents.py#L211-L220
|
15,282
|
EventRegistry/event-registry-python
|
eventregistry/ReturnInfo.py
|
ReturnInfoFlagsBase._setFlag
|
def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val
|
python
|
def _setFlag(self, name, val, defVal):
"""set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal"""
if not hasattr(self, "flags"):
self.flags = {}
if val != defVal:
self.flags[name] = val
|
[
"def",
"_setFlag",
"(",
"self",
",",
"name",
",",
"val",
",",
"defVal",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"flags\"",
")",
":",
"self",
".",
"flags",
"=",
"{",
"}",
"if",
"val",
"!=",
"defVal",
":",
"self",
".",
"flags",
"[",
"name",
"]",
"=",
"val"
] |
set the objects property propName if the dictKey key exists in dict and it is not the same as default value defVal
|
[
"set",
"the",
"objects",
"property",
"propName",
"if",
"the",
"dictKey",
"key",
"exists",
"in",
"dict",
"and",
"it",
"is",
"not",
"the",
"same",
"as",
"default",
"value",
"defVal"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L15-L20
|
15,283
|
EventRegistry/event-registry-python
|
eventregistry/ReturnInfo.py
|
ReturnInfoFlagsBase._setVal
|
def _setVal(self, name, val, defVal = None):
"""set value of name to val in case the val != defVal"""
if val == defVal:
return
if not hasattr(self, "vals"):
self.vals = {}
self.vals[name] = val
|
python
|
def _setVal(self, name, val, defVal = None):
"""set value of name to val in case the val != defVal"""
if val == defVal:
return
if not hasattr(self, "vals"):
self.vals = {}
self.vals[name] = val
|
[
"def",
"_setVal",
"(",
"self",
",",
"name",
",",
"val",
",",
"defVal",
"=",
"None",
")",
":",
"if",
"val",
"==",
"defVal",
":",
"return",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"vals\"",
")",
":",
"self",
".",
"vals",
"=",
"{",
"}",
"self",
".",
"vals",
"[",
"name",
"]",
"=",
"val"
] |
set value of name to val in case the val != defVal
|
[
"set",
"value",
"of",
"name",
"to",
"val",
"in",
"case",
"the",
"val",
"!",
"=",
"defVal"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L30-L36
|
15,284
|
EventRegistry/event-registry-python
|
eventregistry/ReturnInfo.py
|
ReturnInfoFlagsBase._getVals
|
def _getVals(self, prefix = ""):
"""
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
"""
if not hasattr(self, "vals"):
self.vals = {}
dict = {}
for key in list(self.vals.keys()):
# if no prefix then lower the first letter
if prefix == "":
newkey = key[:1].lower() + key[1:] if key else ""
dict[newkey] = self.vals[key]
else:
newkey = key[:1].upper() + key[1:] if key else ""
dict[prefix + newkey] = self.vals[key]
return dict
|
python
|
def _getVals(self, prefix = ""):
"""
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
"""
if not hasattr(self, "vals"):
self.vals = {}
dict = {}
for key in list(self.vals.keys()):
# if no prefix then lower the first letter
if prefix == "":
newkey = key[:1].lower() + key[1:] if key else ""
dict[newkey] = self.vals[key]
else:
newkey = key[:1].upper() + key[1:] if key else ""
dict[prefix + newkey] = self.vals[key]
return dict
|
[
"def",
"_getVals",
"(",
"self",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"vals\"",
")",
":",
"self",
".",
"vals",
"=",
"{",
"}",
"dict",
"=",
"{",
"}",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"vals",
".",
"keys",
"(",
")",
")",
":",
"# if no prefix then lower the first letter",
"if",
"prefix",
"==",
"\"\"",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"lower",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"else",
":",
"newkey",
"=",
"key",
"[",
":",
"1",
"]",
".",
"upper",
"(",
")",
"+",
"key",
"[",
"1",
":",
"]",
"if",
"key",
"else",
"\"\"",
"dict",
"[",
"prefix",
"+",
"newkey",
"]",
"=",
"self",
".",
"vals",
"[",
"key",
"]",
"return",
"dict"
] |
return the values in the vals dict
in case prefix is "", change the first letter of the name to lowercase, otherwise use prefix+name as the new name
|
[
"return",
"the",
"values",
"in",
"the",
"vals",
"dict",
"in",
"case",
"prefix",
"is",
"change",
"the",
"first",
"letter",
"of",
"the",
"name",
"to",
"lowercase",
"otherwise",
"use",
"prefix",
"+",
"name",
"as",
"the",
"new",
"name"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L39-L55
|
15,285
|
EventRegistry/event-registry-python
|
eventregistry/ReturnInfo.py
|
ReturnInfo.loadFromFile
|
def loadFromFile(fileName):
"""
load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo
"""
assert os.path.exists(fileName), "File " + fileName + " does not exist"
conf = json.load(open(fileName))
return ReturnInfo(
articleInfo=ArticleInfoFlags(**conf.get("articleInfo", {})),
eventInfo=EventInfoFlags(**conf.get("eventInfo", {})),
sourceInfo=SourceInfoFlags(**conf.get("sourceInfo", {})),
categoryInfo=CategoryInfoFlags(**conf.get("categoryInfo", {})),
conceptInfo=ConceptInfoFlags(**conf.get("conceptInfo", {})),
locationInfo=LocationInfoFlags(**conf.get("locationInfo", {})),
storyInfo=StoryInfoFlags(**conf.get("storyInfo", {})),
conceptClassInfo=ConceptClassInfoFlags(**conf.get("conceptClassInfo", {})),
conceptFolderInfo=ConceptFolderInfoFlags(**conf.get("conceptFolderInfo", {}))
)
|
python
|
def loadFromFile(fileName):
"""
load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo
"""
assert os.path.exists(fileName), "File " + fileName + " does not exist"
conf = json.load(open(fileName))
return ReturnInfo(
articleInfo=ArticleInfoFlags(**conf.get("articleInfo", {})),
eventInfo=EventInfoFlags(**conf.get("eventInfo", {})),
sourceInfo=SourceInfoFlags(**conf.get("sourceInfo", {})),
categoryInfo=CategoryInfoFlags(**conf.get("categoryInfo", {})),
conceptInfo=ConceptInfoFlags(**conf.get("conceptInfo", {})),
locationInfo=LocationInfoFlags(**conf.get("locationInfo", {})),
storyInfo=StoryInfoFlags(**conf.get("storyInfo", {})),
conceptClassInfo=ConceptClassInfoFlags(**conf.get("conceptClassInfo", {})),
conceptFolderInfo=ConceptFolderInfoFlags(**conf.get("conceptFolderInfo", {}))
)
|
[
"def",
"loadFromFile",
"(",
"fileName",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"fileName",
")",
",",
"\"File \"",
"+",
"fileName",
"+",
"\" does not exist\"",
"conf",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"fileName",
")",
")",
"return",
"ReturnInfo",
"(",
"articleInfo",
"=",
"ArticleInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"articleInfo\"",
",",
"{",
"}",
")",
")",
",",
"eventInfo",
"=",
"EventInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"eventInfo\"",
",",
"{",
"}",
")",
")",
",",
"sourceInfo",
"=",
"SourceInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"sourceInfo\"",
",",
"{",
"}",
")",
")",
",",
"categoryInfo",
"=",
"CategoryInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"categoryInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptInfo",
"=",
"ConceptInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptInfo\"",
",",
"{",
"}",
")",
")",
",",
"locationInfo",
"=",
"LocationInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"locationInfo\"",
",",
"{",
"}",
")",
")",
",",
"storyInfo",
"=",
"StoryInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"storyInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptClassInfo",
"=",
"ConceptClassInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptClassInfo\"",
",",
"{",
"}",
")",
")",
",",
"conceptFolderInfo",
"=",
"ConceptFolderInfoFlags",
"(",
"*",
"*",
"conf",
".",
"get",
"(",
"\"conceptFolderInfo\"",
",",
"{",
"}",
")",
")",
")"
] |
load the configuration for the ReturnInfo from a fileName
@param fileName: filename that contains the json configuration to use in the ReturnInfo
|
[
"load",
"the",
"configuration",
"for",
"the",
"ReturnInfo",
"from",
"a",
"fileName"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/ReturnInfo.py#L453-L470
|
15,286
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.loadTopicPageFromER
|
def loadTopicPageFromER(self, uri):
"""
load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account
"""
params = {
"action": "getTopicPageJson",
"includeConceptDescription": True,
"includeTopicPageDefinition": True,
"includeTopicPageOwner": True,
"uri": uri
}
self.topicPage = self._createEmptyTopicPage()
self.concept = self.eventRegistry.jsonRequest("/json/topicPage", params)
self.topicPage.update(self.concept.get("topicPage", {}))
|
python
|
def loadTopicPageFromER(self, uri):
"""
load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account
"""
params = {
"action": "getTopicPageJson",
"includeConceptDescription": True,
"includeTopicPageDefinition": True,
"includeTopicPageOwner": True,
"uri": uri
}
self.topicPage = self._createEmptyTopicPage()
self.concept = self.eventRegistry.jsonRequest("/json/topicPage", params)
self.topicPage.update(self.concept.get("topicPage", {}))
|
[
"def",
"loadTopicPageFromER",
"(",
"self",
",",
"uri",
")",
":",
"params",
"=",
"{",
"\"action\"",
":",
"\"getTopicPageJson\"",
",",
"\"includeConceptDescription\"",
":",
"True",
",",
"\"includeTopicPageDefinition\"",
":",
"True",
",",
"\"includeTopicPageOwner\"",
":",
"True",
",",
"\"uri\"",
":",
"uri",
"}",
"self",
".",
"topicPage",
"=",
"self",
".",
"_createEmptyTopicPage",
"(",
")",
"self",
".",
"concept",
"=",
"self",
".",
"eventRegistry",
".",
"jsonRequest",
"(",
"\"/json/topicPage\"",
",",
"params",
")",
"self",
".",
"topicPage",
".",
"update",
"(",
"self",
".",
"concept",
".",
"get",
"(",
"\"topicPage\"",
",",
"{",
"}",
")",
")"
] |
load an existing topic page from Event Registry based on the topic page URI
@param uri: uri of the topic page saved in your Event Registry account
|
[
"load",
"an",
"existing",
"topic",
"page",
"from",
"Event",
"Registry",
"based",
"on",
"the",
"topic",
"page",
"URI"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L51-L65
|
15,287
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.loadTopicPageFromFile
|
def loadTopicPageFromFile(self, fname):
"""
load topic page from an existing file
"""
assert os.path.exists(fname)
f = open(fname, "r", encoding="utf-8")
self.topicPage = json.load(f)
|
python
|
def loadTopicPageFromFile(self, fname):
"""
load topic page from an existing file
"""
assert os.path.exists(fname)
f = open(fname, "r", encoding="utf-8")
self.topicPage = json.load(f)
|
[
"def",
"loadTopicPageFromFile",
"(",
"self",
",",
"fname",
")",
":",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
"f",
"=",
"open",
"(",
"fname",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"self",
".",
"topicPage",
"=",
"json",
".",
"load",
"(",
"f",
")"
] |
load topic page from an existing file
|
[
"load",
"topic",
"page",
"from",
"an",
"existing",
"file"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L76-L82
|
15,288
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.saveTopicPageDefinitionToFile
|
def saveTopicPageDefinitionToFile(self, fname):
"""
save the topic page definition to a file
"""
open(fname, "w", encoding="utf-8").write(json.dumps(self.topicPage, indent = 4, sort_keys = True))
|
python
|
def saveTopicPageDefinitionToFile(self, fname):
"""
save the topic page definition to a file
"""
open(fname, "w", encoding="utf-8").write(json.dumps(self.topicPage, indent = 4, sort_keys = True))
|
[
"def",
"saveTopicPageDefinitionToFile",
"(",
"self",
",",
"fname",
")",
":",
"open",
"(",
"fname",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"topicPage",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
")"
] |
save the topic page definition to a file
|
[
"save",
"the",
"topic",
"page",
"definition",
"to",
"a",
"file"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L92-L96
|
15,289
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.setArticleThreshold
|
def setArticleThreshold(self, value):
"""
what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["articleTreshWgt"] = value
|
python
|
def setArticleThreshold(self, value):
"""
what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["articleTreshWgt"] = value
|
[
"def",
"setArticleThreshold",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"int",
")",
"assert",
"value",
">=",
"0",
"self",
".",
"topicPage",
"[",
"\"articleTreshWgt\"",
"]",
"=",
"value"
] |
what is the minimum total weight that an article has to have in order to get it among the results?
@param value: threshold to use
|
[
"what",
"is",
"the",
"minimum",
"total",
"weight",
"that",
"an",
"article",
"has",
"to",
"have",
"in",
"order",
"to",
"get",
"it",
"among",
"the",
"results?"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L102-L109
|
15,290
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.setEventThreshold
|
def setEventThreshold(self, value):
"""
what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["eventTreshWgt"] = value
|
python
|
def setEventThreshold(self, value):
"""
what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use
"""
assert isinstance(value, int)
assert value >= 0
self.topicPage["eventTreshWgt"] = value
|
[
"def",
"setEventThreshold",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"int",
")",
"assert",
"value",
">=",
"0",
"self",
".",
"topicPage",
"[",
"\"eventTreshWgt\"",
"]",
"=",
"value"
] |
what is the minimum total weight that an event has to have in order to get it among the results?
@param value: threshold to use
|
[
"what",
"is",
"the",
"minimum",
"total",
"weight",
"that",
"an",
"event",
"has",
"to",
"have",
"in",
"order",
"to",
"get",
"it",
"among",
"the",
"results?"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L112-L119
|
15,291
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.setMaxDaysBack
|
def setMaxDaysBack(self, maxDaysBack):
"""
what is the maximum allowed age of the results?
"""
assert isinstance(maxDaysBack, int), "maxDaysBack value has to be a positive integer"
assert maxDaysBack >= 1
self.topicPage["maxDaysBack"] = maxDaysBack
|
python
|
def setMaxDaysBack(self, maxDaysBack):
"""
what is the maximum allowed age of the results?
"""
assert isinstance(maxDaysBack, int), "maxDaysBack value has to be a positive integer"
assert maxDaysBack >= 1
self.topicPage["maxDaysBack"] = maxDaysBack
|
[
"def",
"setMaxDaysBack",
"(",
"self",
",",
"maxDaysBack",
")",
":",
"assert",
"isinstance",
"(",
"maxDaysBack",
",",
"int",
")",
",",
"\"maxDaysBack value has to be a positive integer\"",
"assert",
"maxDaysBack",
">=",
"1",
"self",
".",
"topicPage",
"[",
"\"maxDaysBack\"",
"]",
"=",
"maxDaysBack"
] |
what is the maximum allowed age of the results?
|
[
"what",
"is",
"the",
"maximum",
"allowed",
"age",
"of",
"the",
"results?"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L164-L170
|
15,292
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addConcept
|
def addConcept(self, conceptUri, weight, label = None, conceptType = None):
"""
add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
concept = {"uri": conceptUri, "wgt": weight}
if label != None: concept["label"] = label
if conceptType != None: concept["type"] = conceptType
self.topicPage["concepts"].append(concept)
|
python
|
def addConcept(self, conceptUri, weight, label = None, conceptType = None):
"""
add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
concept = {"uri": conceptUri, "wgt": weight}
if label != None: concept["label"] = label
if conceptType != None: concept["type"] = conceptType
self.topicPage["concepts"].append(concept)
|
[
"def",
"addConcept",
"(",
"self",
",",
"conceptUri",
",",
"weight",
",",
"label",
"=",
"None",
",",
"conceptType",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"concept",
"=",
"{",
"\"uri\"",
":",
"conceptUri",
",",
"\"wgt\"",
":",
"weight",
"}",
"if",
"label",
"!=",
"None",
":",
"concept",
"[",
"\"label\"",
"]",
"=",
"label",
"if",
"conceptType",
"!=",
"None",
":",
"concept",
"[",
"\"type\"",
"]",
"=",
"conceptType",
"self",
".",
"topicPage",
"[",
"\"concepts\"",
"]",
".",
"append",
"(",
"concept",
")"
] |
add a relevant concept to the topic page
@param conceptUri: uri of the concept to be added
@param weight: importance of the provided concept (typically in range 1 - 50)
|
[
"add",
"a",
"relevant",
"concept",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L211-L221
|
15,293
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addKeyword
|
def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight})
|
python
|
def addKeyword(self, keyword, weight):
"""
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["keywords"].append({"keyword": keyword, "wgt": weight})
|
[
"def",
"addKeyword",
"(",
"self",
",",
"keyword",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"keywords\"",
"]",
".",
"append",
"(",
"{",
"\"keyword\"",
":",
"keyword",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a relevant keyword to the topic page
@param keyword: keyword or phrase to be added
@param weight: importance of the provided keyword (typically in range 1 - 50)
|
[
"add",
"a",
"relevant",
"keyword",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L224-L231
|
15,294
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addCategory
|
def addCategory(self, categoryUri, weight):
"""
add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["categories"].append({"uri": categoryUri, "wgt": weight})
|
python
|
def addCategory(self, categoryUri, weight):
"""
add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["categories"].append({"uri": categoryUri, "wgt": weight})
|
[
"def",
"addCategory",
"(",
"self",
",",
"categoryUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"categories\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"categoryUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a relevant category to the topic page
@param categoryUri: uri of the category to be added
@param weight: importance of the provided category (typically in range 1 - 50)
|
[
"add",
"a",
"relevant",
"category",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L234-L241
|
15,295
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addSource
|
def addSource(self, sourceUri, weight):
"""
add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sources"].append({"uri": sourceUri, "wgt": weight})
|
python
|
def addSource(self, sourceUri, weight):
"""
add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sources"].append({"uri": sourceUri, "wgt": weight})
|
[
"def",
"addSource",
"(",
"self",
",",
"sourceUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sources\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a news source to the topic page
@param sourceUri: uri of the news source to add to the topic page
@param weight: importance of the news source (typically in range 1 - 50)
|
[
"add",
"a",
"news",
"source",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L244-L251
|
15,296
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addSourceLocation
|
def addSourceLocation(self, sourceLocationUri, weight):
"""
add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceLocations"].append({"uri": sourceLocationUri, "wgt": weight})
|
python
|
def addSourceLocation(self, sourceLocationUri, weight):
"""
add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceLocations"].append({"uri": sourceLocationUri, "wgt": weight})
|
[
"def",
"addSourceLocation",
"(",
"self",
",",
"sourceLocationUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sourceLocations\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceLocationUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a list of relevant sources by identifying them by their geographic location
@param sourceLocationUri: uri of the location where the sources should be geographically located
@param weight: importance of the provided list of sources (typically in range 1 - 50)
|
[
"add",
"a",
"list",
"of",
"relevant",
"sources",
"by",
"identifying",
"them",
"by",
"their",
"geographic",
"location"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L254-L261
|
15,297
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addSourceGroup
|
def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight})
|
python
|
def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight})
|
[
"def",
"addSourceGroup",
"(",
"self",
",",
"sourceGroupUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"sourceGroups\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"sourceGroupUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
|
[
"add",
"a",
"list",
"of",
"relevant",
"sources",
"by",
"specifying",
"a",
"whole",
"source",
"group",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L264-L271
|
15,298
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.addLocation
|
def addLocation(self, locationUri, weight):
"""
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["locations"].append({"uri": locationUri, "wgt": weight})
|
python
|
def addLocation(self, locationUri, weight):
"""
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["locations"].append({"uri": locationUri, "wgt": weight})
|
[
"def",
"addLocation",
"(",
"self",
",",
"locationUri",
",",
"weight",
")",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"weight value has to be a positive or negative integer\"",
"self",
".",
"topicPage",
"[",
"\"locations\"",
"]",
".",
"append",
"(",
"{",
"\"uri\"",
":",
"locationUri",
",",
"\"wgt\"",
":",
"weight",
"}",
")"
] |
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
|
[
"add",
"relevant",
"location",
"to",
"the",
"topic",
"page"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L274-L281
|
15,299
|
EventRegistry/event-registry-python
|
eventregistry/TopicPage.py
|
TopicPage.setLanguages
|
def setLanguages(self, languages):
"""
restrict the results to the list of specified languages
"""
if isinstance(languages, six.string_types):
languages = [languages]
for lang in languages:
assert len(lang) == 3, "Expected to get language in ISO3 code"
self.topicPage["langs"] = languages
|
python
|
def setLanguages(self, languages):
"""
restrict the results to the list of specified languages
"""
if isinstance(languages, six.string_types):
languages = [languages]
for lang in languages:
assert len(lang) == 3, "Expected to get language in ISO3 code"
self.topicPage["langs"] = languages
|
[
"def",
"setLanguages",
"(",
"self",
",",
"languages",
")",
":",
"if",
"isinstance",
"(",
"languages",
",",
"six",
".",
"string_types",
")",
":",
"languages",
"=",
"[",
"languages",
"]",
"for",
"lang",
"in",
"languages",
":",
"assert",
"len",
"(",
"lang",
")",
"==",
"3",
",",
"\"Expected to get language in ISO3 code\"",
"self",
".",
"topicPage",
"[",
"\"langs\"",
"]",
"=",
"languages"
] |
restrict the results to the list of specified languages
|
[
"restrict",
"the",
"results",
"to",
"the",
"list",
"of",
"specified",
"languages"
] |
534d20b616de02f5e1cd73665a02d189645dbeb6
|
https://github.com/EventRegistry/event-registry-python/blob/534d20b616de02f5e1cd73665a02d189645dbeb6/eventregistry/TopicPage.py#L284-L292
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.